From 6b35e57c434006884c10febcd85f8e77b66ae13d Mon Sep 17 00:00:00 2001 From: Jonathan Rajotte Date: Thu, 22 Nov 2018 18:56:52 -0500 Subject: [PATCH] LAVA: Upload results to obj.internal.efficios.com This is in prevision of upgrade to lava2. Lava 2 do not support the lava-test-case-attach feature. Signed-off-by: Jonathan Rajotte --- lava/system-tests/destructive-tests.yml | 15 +++- lava/system-tests/failing-close.yml | 19 +++-- lava/system-tests/failing-ioctl.yml | 19 +++-- lava/system-tests/failing-open-efault.yml | 19 +++-- lava/system-tests/failing-open-enoent.yml | 19 +++-- lava/system-tests/kernel-tests.yml | 16 +++- lava/system-tests/lttng-test-filter.yml | 19 +++-- lava/system-tests/perf-tests.yml | 14 +++- lava/system-tests/raw-syscall-getpid.yml | 17 ++--- lava/system-tests/success-dup-close.yml | 18 ++--- lava/upload_artifact.sh | 37 ++++++++++ scripts/system-tests/lava-submit.py | 73 +++++++++---------- scripts/system-tests/lava2-submit.py | 2 + .../system-tests/run-baremetal-benchmarks.sh | 1 + scripts/system-tests/run-baremetal-tests.sh | 1 + scripts/system-tests/run-kvm-fuzzing-tests.sh | 1 + scripts/system-tests/run-kvm-tests.sh | 1 + scripts/system-tests/template_lava_job.jinja2 | 20 +++++ 18 files changed, 193 insertions(+), 118 deletions(-) create mode 100755 lava/upload_artifact.sh diff --git a/lava/system-tests/destructive-tests.yml b/lava/system-tests/destructive-tests.yml index 32150e0..a5668eb 100644 --- a/lava/system-tests/destructive-tests.yml +++ b/lava/system-tests/destructive-tests.yml @@ -3,22 +3,29 @@ metadata: name: lttng-destructive-tests description: "Run root destructive test suite" install: + deps: + - curl + git-repos: + - url: https://github.com/lttng/lttng-ci + destination: ci + branch: master steps: - export TMPDIR="/tmp" - - cd - systemctl stop systemd-timesyncd.service - ulimit -c unlimited - mkdir -p coredump - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern +params: + JENKINS_BUILD_ID: "invalid_jenkins_build_id" run: steps: - source /root/lttngvenv/activate - - cd /root/lttngvenv/src/lttng-tools + - pushd /root/lttngvenv/src/lttng-tools - lava-test-case build-test-suite --shell "make" - export LTTNG_ENABLE_DESTRUCTIVE_TESTS="will-break-my-system" - cd tests #Need to check if the file is present for branches where the testcase was not backported - lava-test-case run-tests --shell "if [ -e root_destructive_tests ]; then prove --verbose --merge --exec '' - < root_destructive_tests; else echo 'root_destructive_tests not found'; fi" - - cd + - popd - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-tests coredump.tar.gz + - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/coredump.tar.gz" diff --git a/lava/system-tests/failing-close.yml b/lava/system-tests/failing-close.yml index f326a9f..6270669 100644 --- a/lava/system-tests/failing-close.yml +++ b/lava/system-tests/failing-close.yml @@ -3,12 +3,13 @@ metadata: name: benchmark-syscall-failing-close description: "Perform syscall tracing benchmark of failing close" params: - JENKINS_JOBNAME: "default jobname" + JENKINS_BUILD_ID: "invalid_jenkins_build_id" install: deps: - python3-pandas - python3-numpy + - curl git-repos: - url: https://github.com/lttng/lttng-ci destination: ci @@ -23,15 +24,13 @@ run: - source /root/lttngvenv/activate - export BENCHMARK_DIR=$(mktemp --directory)/bm - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR - - cd $BENCHMARK_DIR + - pushd $BENCHMARK_DIR - lava-test-case build-benchmarks --shell "make" - lava-test-case run-benchmarks --shell "./run.sh failing-close sys_close" - - lava-test-case-attach run-benchmarks "./results.csv" - - cd - - - cd ci - - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv - - mv ./processed_results.csv ../processed_results_close.csv - - cd - + - popd + - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv + - mv ./processed_results.csv ./processed_results_close.csv - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-benchmarks coredump.tar.gz - - lava-test-case-attach run-benchmarks "./processed_results_close.csv" + - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz" + - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_close.csv" + - ./ci/lava/upload_artifact.sh processed_results_close.csv "results/${JENKINS_BUILD_ID}/processed_results_close.csv" diff --git a/lava/system-tests/failing-ioctl.yml b/lava/system-tests/failing-ioctl.yml index 0ea2872..695999d 100644 --- a/lava/system-tests/failing-ioctl.yml +++ b/lava/system-tests/failing-ioctl.yml @@ -3,12 +3,13 @@ metadata: name: benchmark-syscall-failing-ioctl description: "Perform syscall tracing benchmark of failing ioctl" params: - JENKINS_JOBNAME: "default jobname" + JENKINS_BUILD_ID: "invalid_jenkins_build_id" install: deps: - python3-pandas - python3-numpy + - curl git-repos: - url: https://github.com/lttng/lttng-ci destination: ci @@ -23,15 +24,13 @@ run: - source /root/lttngvenv/activate - export BENCHMARK_DIR=$(mktemp --directory)/bm - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR - - cd $BENCHMARK_DIR + - pushd $BENCHMARK_DIR - lava-test-case build-benchmarks --shell "make" - lava-test-case run-benchmarks --shell "./run.sh failing-ioctl sys_ioctl" - - lava-test-case-attach run-benchmarks "./results.csv" - - cd - - - cd ci - - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv - - mv ./processed_results.csv ../processed_results_ioctl.csv - - cd - + - popd + - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv + - mv ./processed_results.csv ./processed_results_ioctl.csv - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-benchmarks coredump.tar.gz - - lava-test-case-attach run-benchmarks "./processed_results_ioctl.csv" + - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz" + - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_ioctl.csv" + - ./ci/lava/upload_artifact.sh processed_results_ioctl.csv "results/${JENKINS_BUILD_ID}/processed_results_ioctl.csv" diff --git a/lava/system-tests/failing-open-efault.yml b/lava/system-tests/failing-open-efault.yml index 790ae36..2235dc7 100644 --- a/lava/system-tests/failing-open-efault.yml +++ b/lava/system-tests/failing-open-efault.yml @@ -3,12 +3,13 @@ metadata: name: benchmark-syscall-failing-open-efault description: "Perform syscall tracing benchmark of failing open-efault" params: - JENKINS_JOBNAME: "default jobname" + JENKINS_BUILD_ID: "invalid_jenkins_build_id" install: deps: - python3-pandas - python3-numpy + - curl git-repos: - url: https://github.com/lttng/lttng-ci destination: ci @@ -23,15 +24,13 @@ run: - source /root/lttngvenv/activate - export BENCHMARK_DIR=$(mktemp --directory)/bm - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR - - cd $BENCHMARK_DIR + - pushd $BENCHMARK_DIR - lava-test-case build-benchmarks --shell "make" - lava-test-case run-benchmarks --shell "./run.sh failing-open-efault sys_open" - - lava-test-case-attach run-benchmarks "./results.csv" - - cd - - - cd ci - - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv - - mv ./processed_results.csv ../processed_results_open_efault.csv - - cd - + - popd + - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv + - mv ./processed_results.csv ./processed_results_open_efault.csv - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-benchmarks coredump.tar.gz - - lava-test-case-attach run-benchmarks "./processed_results_open_efault.csv" + - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz" + - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_open_efault.csv" + - ./ci/lava/upload_artifact.sh processed_results_open_efault.csv "results/${JENKINS_BUILD_ID}/processed_results_open_efault.csv" diff --git a/lava/system-tests/failing-open-enoent.yml b/lava/system-tests/failing-open-enoent.yml index 34c4dac..5eee241 100644 --- a/lava/system-tests/failing-open-enoent.yml +++ b/lava/system-tests/failing-open-enoent.yml @@ -3,12 +3,13 @@ metadata: name: benchmark-syscall-failing-open-enoent description: "Perform syscall tracing benchmark of failing open-enoent" params: - JENKINS_JOBNAME: "default jobname" + JENKINS_BUILD_ID: "invalid_jenkins_build_id" install: deps: - python3-pandas - python3-numpy + - curl git-repos: - url: https://github.com/lttng/lttng-ci destination: ci @@ -23,15 +24,13 @@ run: - source /root/lttngvenv/activate - export BENCHMARK_DIR=$(mktemp --directory)/bm - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR - - cd $BENCHMARK_DIR + - pushd $BENCHMARK_DIR - lava-test-case build-benchmarks --shell "make" - lava-test-case run-benchmarks --shell "./run.sh failing-open-enoent sys_open" - - lava-test-case-attach run-benchmarks "./results.csv" - - cd - - - cd ci - - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv - - mv ./processed_results.csv ../processed_results_open_enoent.csv - - cd - + - popd + - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv + - mv ./processed_results.csv ./processed_results_open_enoent.csv - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-benchmarks coredump.tar.gz - - lava-test-case-attach run-benchmarks "./processed_results_open_enoent.csv" + - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz" + - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_open_enoent.csv" + - ./ci/lava/upload_artifact.sh processed_results_open_enoent.csv "results/${JENKINS_BUILD_ID}/processed_results_open_enoent.csv" diff --git a/lava/system-tests/kernel-tests.yml b/lava/system-tests/kernel-tests.yml index a5db623..5c7c67c 100644 --- a/lava/system-tests/kernel-tests.yml +++ b/lava/system-tests/kernel-tests.yml @@ -2,20 +2,28 @@ metadata: format: Lava-Test Test Definition 1.0 name: lttng-kernel-test description: "Run kernel test suite" +params: + JENKINS_BUILD_ID: "invalid_jenkins_build_id" + install: + deps: + - curl + git-repos: + - url: https://github.com/lttng/lttng-ci + destination: ci + branch: master steps: - export TMPDIR="/tmp" - - cd - ulimit -c unlimited - mkdir -p coredump - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern run: steps: - source /root/lttngvenv/activate - - cd /root/lttngvenv/src/lttng-tools + - pushd /root/lttngvenv/src/lttng-tools - lava-test-case build-test-suite --shell "make" - cd tests - lava-test-case run-tests --shell "prove --verbose --merge --exec '' - < root_regression" - - cd + - popd - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-tests coredump.tar.gz + - ./ci/lava/upload_artifact.sh coredump.tar.gz coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz" diff --git a/lava/system-tests/lttng-test-filter.yml b/lava/system-tests/lttng-test-filter.yml index a8f288d..b952a2b 100644 --- a/lava/system-tests/lttng-test-filter.yml +++ b/lava/system-tests/lttng-test-filter.yml @@ -3,12 +3,13 @@ metadata: name: benchmark-lttng-test-filter description: "Perform syscall tracing benchmark of the lttng-test-filter" params: - JENKINS_JOBNAME: "default jobname" + JENKINS_BUILD_ID: "invalid_jenkins_build_id" install: deps: - python3-pandas - python3-numpy + - curl git-repos: - url: https://github.com/lttng/lttng-ci destination: ci @@ -23,16 +24,14 @@ run: - source /root/lttngvenv/activate - export BENCHMARK_DIR=$(mktemp --directory)/bm - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR - - cd $BENCHMARK_DIR + - pushd $BENCHMARK_DIR - modprobe lttng-test - lava-test-case build-benchmarks --shell "make" - lava-test-case run-benchmarks --shell "./run.sh lttng-test-filter lttng_test_filter_event" - - lava-test-case-attach run-benchmarks "./results.csv" - - cd - - - cd ci - - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv - - mv ./processed_results.csv ../processed_results_lttng_test_filter.csv - - cd - + - popd + - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv + - mv ./processed_results.csv ./processed_results_lttng_test_filter.csv - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-benchmarks coredump.tar.gz - - lava-test-case-attach run-benchmarks "./processed_results_lttng_test_filter.csv" + - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz" + - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_lttng_test_filter.csv" + - ./ci/lava/upload_artifact.sh processed_results_lttng_test_filter.csv "results/${JENKINS_BUILD_ID}/processed_results_lttng_test_filter.csv" diff --git a/lava/system-tests/perf-tests.yml b/lava/system-tests/perf-tests.yml index c03892b..33110a7 100644 --- a/lava/system-tests/perf-tests.yml +++ b/lava/system-tests/perf-tests.yml @@ -2,23 +2,29 @@ metadata: format: Lava-Test Test Definition 1.0 name: lttng-perf-tests description: "Run perf regression test suite" +params: + JENKINS_BUILD_ID: "invalid_jenkins_build_id" install: deps: - libpfm4-dev + - curl + git-repos: + - url: https://github.com/lttng/lttng-ci + destination: ci + branch: master steps: - export TMPDIR="/tmp" - - cd - ulimit -c unlimited - mkdir -p coredump - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern run: steps: - source /root/lttngvenv/activate - - cd /root/lttngvenv/src/lttng-tools + - pushd /root/lttngvenv/src/lttng-tools - lava-test-case build-test-suite --shell "make" - cd tests #Need to check if the file is present for branches where the testcase was not backported - lava-test-case run-tests --shell "if [ -e perf_regression ]; then prove --verbose --merge --exec '' - < perf_regression; else echo 'perf_regression not found'; fi" - - cd + - popd - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-tests coredump.tar.gz + - ./ci/lava/upload_artifact.sh coredump.tar.gz coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz" diff --git a/lava/system-tests/raw-syscall-getpid.yml b/lava/system-tests/raw-syscall-getpid.yml index f4da5cd..2342aa0 100644 --- a/lava/system-tests/raw-syscall-getpid.yml +++ b/lava/system-tests/raw-syscall-getpid.yml @@ -3,7 +3,7 @@ metadata: name: benchmark-raw-syscall-getpid description: "Perform syscall tracing benchmark of the raw syscall getpid" params: - JENKINS_JOBNAME: "default jobname" + JENKINS_BUILD_ID: "invalid_jenkins_build_id" install: deps: @@ -23,15 +23,14 @@ run: - source /root/lttngvenv/activate - export BENCHMARK_DIR=$(mktemp --directory)/bm - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR - - cd $BENCHMARK_DIR + - pushd $BENCHMARK_DIR - lava-test-case build-benchmarks --shell "make" - lava-test-case run-benchmarks --shell "./run.sh raw-syscall-getpid sys_getpid" - lava-test-case-attach run-benchmarks "./results.csv" - - cd - - - cd ci - - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv - - mv ./processed_results.csv ../processed_results_raw_syscall_getpid.csv - - cd - + - popd + - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv + - mv ./processed_results.csv ./processed_results_raw_syscall_getpid.csv - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-benchmarks coredump.tar.gz - - lava-test-case-attach run-benchmarks "./processed_results_raw_syscall_getpid.csv" + - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz" + - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_raw_syscall_getpid.csv" + - ./ci/lava/upload_artifact.sh processed_results_raw_syscall_getpid.csv "results/${JENKINS_BUILD_ID}/processed_results_raw_syscall_getpid.csv" diff --git a/lava/system-tests/success-dup-close.yml b/lava/system-tests/success-dup-close.yml index 0443984..08ae82f 100644 --- a/lava/system-tests/success-dup-close.yml +++ b/lava/system-tests/success-dup-close.yml @@ -3,12 +3,13 @@ metadata: name: benchmark-syscall-success-dup-close description: "Perform syscall tracing benchmark of successful dup and close" params: - JENKINS_JOBNAME: "default jobname" + JENKINS_BUILD_ID: "invalid_jenkins_build_id" install: deps: - python3-pandas - python3-numpy + - curl git-repos: - url: https://github.com/lttng/lttng-ci destination: ci @@ -23,15 +24,14 @@ run: - source /root/lttngvenv/activate - export BENCHMARK_DIR=$(mktemp --directory)/bm - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR - - cd $BENCHMARK_DIR + - pushd $BENCHMARK_DIR - lava-test-case build-benchmarks --shell "make" - lava-test-case run-benchmarks --shell "./run.sh success-dup-close sys_close,sys_dup" - lava-test-case-attach run-benchmarks "./results.csv" - - cd - - - cd ci - - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv - - mv ./processed_results.csv ../processed_results_dup_close.csv - - cd - + - popd + - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv + - mv ./processed_results.csv ./processed_results_dup_close.csv - tar czf coredump.tar.gz coredump - - lava-test-case-attach run-benchmarks coredump.tar.gz - - lava-test-case-attach run-benchmarks "./processed_results_dup_close.csv" + - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz" + - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_dup_close.csv" + - ./ci/lava/upload_artifact.sh processed_results_dup_close.csv "results/${JENKINS_BUILD_ID}/processed_results_dup_close.csv" diff --git a/lava/upload_artifact.sh b/lava/upload_artifact.sh new file mode 100755 index 0000000..333593b --- /dev/null +++ b/lava/upload_artifact.sh @@ -0,0 +1,37 @@ +#!/bin/bash -eux +# Copyright (C) 2018 - Jonathan Rajotte-Julien +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +bucket=lava +file=$1 +#Path must include the file name +path=$2 + +host=obj.internal.efficios.com +s3_k='jenkins' +s3_s='echo123456' + +resource="/${bucket}/${path}" +content_type="application/octet-stream" +date=`date -R` +_signature="PUT\n\n${content_type}\n${date}\n${resource}" +signature=`echo -en ${_signature} | openssl sha1 -hmac ${s3_s} -binary | base64` + +curl -v -X PUT -T "${file}" \ + -H "Host: $host" \ + -H "Date: ${date}" \ + -H "Content-Type: ${content_type}" \ + -H "Authorization: AWS ${s3_k}:${signature}" \ + https://$host${resource} diff --git a/scripts/system-tests/lava-submit.py b/scripts/system-tests/lava-submit.py index cb1f779..3cf1131 100644 --- a/scripts/system-tests/lava-submit.py +++ b/scripts/system-tests/lava-submit.py @@ -22,12 +22,15 @@ import random import sys import time import xmlrpc.client +from urllib.parse import urljoin +from urllib.request import urlretrieve from collections import OrderedDict from enum import Enum USERNAME = 'frdeso' HOSTNAME = 'lava-master.internal.efficios.com' SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com' +OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/" class TestType(Enum): baremetal_benchmarks=1 @@ -71,8 +74,7 @@ def check_job_all_test_cases_state_count(server, job): # Get the benchmark results from the lava bundle # save them as CSV files localy -def fetch_benchmark_results(server, job): - content = get_job_bundle_content(server, job) +def fetch_benchmark_results(build_id): testcases = ['processed_results_close.csv', 'processed_results_ioctl.csv', 'processed_results_open_efault.csv', @@ -80,25 +82,9 @@ def fetch_benchmark_results(server, job): 'processed_results_dup_close.csv', 'processed_results_raw_syscall_getpid.csv', 'processed_results_lttng_test_filter.csv'] - - # The result bundle is a large JSON containing the results of every testcase - # of the LAVA job as well as the files that were attached during the run. - # We need to iterate over this JSON to get the base64 representation of the - # benchmark results produced during the run. - for run in content['test_runs']: - # We only care of the benchmark testcases - if 'benchmark-' in run['test_id']: - if 'test_results' in run: - for res in run['test_results']: - if 'attachments' in res: - for a in res['attachments']: - # We only save the results file - if a['pathname'] in testcases: - with open(a['pathname'],'wb') as f: - # Convert the b64 representation of the - # result file and write it to a file - # in the current working directory - f.write(base64.b64decode(a['content'])) + for testcase in testcases: + url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase)) + urlretrieve(url, case) # Parse the attachment of the testcase to fetch the stdout of the test suite def print_test_output(server, job): @@ -186,7 +172,7 @@ def get_config_cmd(build_device): ]) return command -def get_baremetal_benchmarks_cmd(): +def get_baremetal_benchmarks_cmd(build_id): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -194,37 +180,44 @@ def get_baremetal_benchmarks_cmd(): { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/failing-close.yml' + 'testdef': 'lava/system-tests/failing-close.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/failing-ioctl.yml' + 'testdef': 'lava/system-tests/failing-ioctl.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/failing-open-efault.yml' + 'testdef': 'lava/system-tests/failing-open-efault.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/success-dup-close.yml' + 'testdef': 'lava/system-tests/success-dup-close.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/raw-syscall-getpid.yml' + 'testdef': 'lava/system-tests/raw-syscall-getpid.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/failing-open-enoent.yml' + 'testdef': 'lava/system-tests/failing-open-enoent.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/lttng-test-filter.yml' + 'testdef': 'lava/system-tests/lttng-test-filter.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } } ], 'timeout': 7200 @@ -232,7 +225,7 @@ def get_baremetal_benchmarks_cmd(): }) return command -def get_baremetal_tests_cmd(): +def get_baremetal_tests_cmd(build_id): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -240,7 +233,8 @@ def get_baremetal_tests_cmd(): { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/perf-tests.yml' + 'testdef': 'lava/system-tests/perf-tests.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } } ], 'timeout': 3600 @@ -248,7 +242,7 @@ def get_baremetal_tests_cmd(): }) return command -def get_kvm_tests_cmd(): +def get_kvm_tests_cmd(build_id): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -256,12 +250,14 @@ def get_kvm_tests_cmd(): { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/kernel-tests.yml' + 'testdef': 'lava/system-tests/kernel-tests.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', - 'testdef': 'lava/system-tests/destructive-tests.yml' + 'testdef': 'lava/system-tests/destructive-tests.yml', + 'parameters': { 'JENKINS_BUILD_ID': build_id } } ], 'timeout': 7200 @@ -404,6 +400,7 @@ def main(): parser.add_argument('-k', '--kernel', required=True) parser.add_argument('-lm', '--lmodule', required=True) parser.add_argument('-tc', '--tools-commit', required=True) + parser.add_argument('-id', '--build-id', required=True) parser.add_argument('-uc', '--ust-commit', required=False) args = parser.parse_args() @@ -441,7 +438,7 @@ def main(): if test_type is TestType.baremetal_benchmarks: j['actions'].append(get_config_cmd('x86')) j['actions'].append(get_env_setup_cmd('x86', args.tools_commit)) - j['actions'].append(get_baremetal_benchmarks_cmd()) + j['actions'].append(get_baremetal_benchmarks_cmd(args.build_id)) j['actions'].append(get_results_cmd(stream_name='benchmark-kernel')) elif test_type is TestType.baremetal_tests: if args.ust_commit is None: @@ -449,7 +446,7 @@ def main(): return -1 j['actions'].append(get_config_cmd('x86')) j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit)) - j['actions'].append(get_baremetal_tests_cmd()) + j['actions'].append(get_baremetal_tests_cmd(args.build_id)) j['actions'].append(get_results_cmd(stream_name='tests-kernel')) elif test_type is TestType.kvm_tests: if args.ust_commit is None: @@ -457,7 +454,7 @@ def main(): return -1 j['actions'].append(get_config_cmd('kvm')) j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit)) - j['actions'].append(get_kvm_tests_cmd()) + j['actions'].append(get_kvm_tests_cmd(args.build_id)) j['actions'].append(get_results_cmd(stream_name='tests-kernel')) elif test_type is TestType.kvm_fuzzing_tests: if args.ust_commit is None: @@ -491,7 +488,7 @@ def main(): if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests: print_test_output(server, jobid) elif test_type is TestType.baremetal_benchmarks: - fetch_benchmark_results(server, jobid) + fetch_benchmark_results(args.build_id) print('Job ended with {} status.'.format(jobstatus)) if jobstatus not in 'Complete': diff --git a/scripts/system-tests/lava2-submit.py b/scripts/system-tests/lava2-submit.py index 47cc610..2ad6512 100644 --- a/scripts/system-tests/lava2-submit.py +++ b/scripts/system-tests/lava2-submit.py @@ -191,6 +191,7 @@ def main(): parser.add_argument('-k', '--kernel', required=True) parser.add_argument('-lm', '--lmodule', required=True) parser.add_argument('-tc', '--tools-commit', required=True) + parser.add_argument('-id', '--build-id', required=True) parser.add_argument('-uc', '--ust-commit', required=False) parser.add_argument('-d', '--debug', required=False, action='store_true') args = parser.parse_args() @@ -246,6 +247,7 @@ def main(): context['kernel_url'] = args.kernel context['nfsrootfs_url'] = nfsrootfs context['lttng_modules_url'] = args.lmodule + context['jenkins_build_id'] = args.build_id context['kprobe_round_nb'] = 10 diff --git a/scripts/system-tests/run-baremetal-benchmarks.sh b/scripts/system-tests/run-baremetal-benchmarks.sh index 33378a4..437ea2a 100644 --- a/scripts/system-tests/run-baremetal-benchmarks.sh +++ b/scripts/system-tests/run-baremetal-benchmarks.sh @@ -30,6 +30,7 @@ python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava2-submit.py \ -k "$S3_URL_KERNEL_IMAGE" \ -lm "$S3_URL_LTTNG_MODULES" \ -tc "$LTTNG_TOOLS_COMMIT_ID" \ + -id "$BUILD_TAG" \ --debug python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \ diff --git a/scripts/system-tests/run-baremetal-tests.sh b/scripts/system-tests/run-baremetal-tests.sh index b359c8e..6d652e3 100644 --- a/scripts/system-tests/run-baremetal-tests.sh +++ b/scripts/system-tests/run-baremetal-tests.sh @@ -32,6 +32,7 @@ python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava2-submit.py \ -lm "$S3_URL_LTTNG_MODULES" \ -tc "$LTTNG_TOOLS_COMMIT_ID" \ -uc "$LTTNG_UST_COMMIT_ID" \ + -id "$BUILD_TAG" \ --debug python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \ diff --git a/scripts/system-tests/run-kvm-fuzzing-tests.sh b/scripts/system-tests/run-kvm-fuzzing-tests.sh index ae1cb36..be846af 100644 --- a/scripts/system-tests/run-kvm-fuzzing-tests.sh +++ b/scripts/system-tests/run-kvm-fuzzing-tests.sh @@ -31,6 +31,7 @@ python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava2-submit.py \ -lm "$S3_URL_LTTNG_MODULES" \ -tc "$LTTNG_TOOLS_COMMIT_ID" \ -uc "$LTTNG_UST_COMMIT_ID" \ + -id "$BUILD_TAG" \ --debug python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \ diff --git a/scripts/system-tests/run-kvm-tests.sh b/scripts/system-tests/run-kvm-tests.sh index 7de8034..8b9722b 100644 --- a/scripts/system-tests/run-kvm-tests.sh +++ b/scripts/system-tests/run-kvm-tests.sh @@ -31,6 +31,7 @@ python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava2-submit.py \ -lm "$S3_URL_LTTNG_MODULES" \ -tc "$LTTNG_TOOLS_COMMIT_ID" \ -uc "$LTTNG_UST_COMMIT_ID" \ + -id "$BUILD_TAG" \ --debug python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \ diff --git a/scripts/system-tests/template_lava_job.jinja2 b/scripts/system-tests/template_lava_job.jinja2 index 967dc82..878da10 100644 --- a/scripts/system-tests/template_lava_job.jinja2 +++ b/scripts/system-tests/template_lava_job.jinja2 @@ -91,44 +91,64 @@ actions: from: git path: lava/system-tests/failing-close.yml name: failing-close + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} - repository: https://github.com/lttng/lttng-ci.git from: git path: lava/system-tests/failing-ioctl.yml name: failing-ioctl + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} - repository: https://github.com/lttng/lttng-ci.git from: git path: lava/system-tests/failing-open-efault.yml name: failing-open-efault + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} - repository: https://github.com/lttng/lttng-ci.git from: git path: lava/system-tests/success-dup-close.yml name: success-dup-close + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} - repository: https://github.com/lttng/lttng-ci.git from: git path: lava/system-tests/raw-syscall-getpid.yml name: raw-syscall-getpid + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} - repository: https://github.com/lttng/lttng-ci.git from: git path: lava/system-tests/failing-open-enoent.yml name: failing-open-enoent + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} - repository: https://github.com/lttng/lttng-ci.git from: git path: lava/system-tests/lttng-test-filter.yml name: lttng-test-filter + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} {% elif test_type == TestType.baremetal_tests %} - repository: https://github.com/lttng/lttng-ci.git from: git path: lava/system-tests/perf-tests.yml name: perf-tests + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} {% elif test_type == TestType.kvm_tests %} - repository: https://github.com/lttng/lttng-ci.git from: git path: lava/system-tests/kernel-tests.yml name: kernel-tests + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} - repository: https://github.com/lttng/lttng-ci.git from: git path: lava/system-tests/destructive-tests.yml name: destructive-tests + params: + JENKINS_BUILD_ID: {{ jenkins_build_id }} {% elif test_type == TestType.kvm_fuzzing_tests %} - repository: https://github.com/lttng/lttng-ci.git from: git -- 2.34.1