LAVA: Upload results to obj.internal.efficios.com
authorJonathan Rajotte <jonathan.rajotte-julien@efficios.com>
Thu, 22 Nov 2018 23:56:52 +0000 (18:56 -0500)
committerJonathan Rajotte <jonathan.rajotte-julien@efficios.com>
Fri, 23 Nov 2018 22:10:20 +0000 (17:10 -0500)
This is in prevision of upgrade to lava2. Lava 2 do not support the
lava-test-case-attach feature.

Signed-off-by: Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
18 files changed:
lava/system-tests/destructive-tests.yml
lava/system-tests/failing-close.yml
lava/system-tests/failing-ioctl.yml
lava/system-tests/failing-open-efault.yml
lava/system-tests/failing-open-enoent.yml
lava/system-tests/kernel-tests.yml
lava/system-tests/lttng-test-filter.yml
lava/system-tests/perf-tests.yml
lava/system-tests/raw-syscall-getpid.yml
lava/system-tests/success-dup-close.yml
lava/upload_artifact.sh [new file with mode: 0755]
scripts/system-tests/lava-submit.py
scripts/system-tests/lava2-submit.py
scripts/system-tests/run-baremetal-benchmarks.sh
scripts/system-tests/run-baremetal-tests.sh
scripts/system-tests/run-kvm-fuzzing-tests.sh
scripts/system-tests/run-kvm-tests.sh
scripts/system-tests/template_lava_job.jinja2

index 32150e0f87a247c3277b821025396c659f25f8cf..a5668eb1a9662f502851afa46c9f6f5a39fe7ef9 100644 (file)
@@ -3,22 +3,29 @@ metadata:
         name: lttng-destructive-tests
         description: "Run root destructive test suite"
 install:
+        deps:
+                - curl
+        git-repos:
+                - url: https://github.com/lttng/lttng-ci
+                  destination: ci
+                  branch: master
         steps:
                 - export TMPDIR="/tmp"
-                - cd
                 - systemctl stop systemd-timesyncd.service
                 - ulimit -c unlimited
                 - mkdir -p coredump
                 - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
+params:
+  JENKINS_BUILD_ID: "invalid_jenkins_build_id"
 run:
         steps:
                 - source /root/lttngvenv/activate
-                - cd /root/lttngvenv/src/lttng-tools
+                - pushd /root/lttngvenv/src/lttng-tools
                 - lava-test-case build-test-suite --shell "make"
                 - export LTTNG_ENABLE_DESTRUCTIVE_TESTS="will-break-my-system"
                 - cd tests
                 #Need to check if the file is present for branches where the testcase was not backported
                 - lava-test-case run-tests --shell "if [ -e root_destructive_tests ]; then prove --verbose --merge --exec '' - < root_destructive_tests; else echo 'root_destructive_tests not found'; fi"
-                - cd
+                - popd
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-tests coredump.tar.gz
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/coredump.tar.gz"
index f326a9f0fad3ea84a19c340eda9c9973ccd7f8ed..627066978cc755f54079366a510cc0b6170b68d0 100644 (file)
@@ -3,12 +3,13 @@ metadata:
         name: benchmark-syscall-failing-close
         description: "Perform syscall tracing benchmark of failing close"
 params:
-    JENKINS_JOBNAME: "default jobname"
+    JENKINS_BUILD_ID: "invalid_jenkins_build_id"
 
 install:
         deps:
                 - python3-pandas
                 - python3-numpy
+                - curl
         git-repos:
                 - url: https://github.com/lttng/lttng-ci
                   destination: ci
@@ -23,15 +24,13 @@ run:
                 - source /root/lttngvenv/activate
                 - export BENCHMARK_DIR=$(mktemp --directory)/bm
                 - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
-                - cd $BENCHMARK_DIR
+                - pushd $BENCHMARK_DIR
                 - lava-test-case build-benchmarks --shell "make"
                 - lava-test-case run-benchmarks --shell "./run.sh failing-close sys_close"
-                - lava-test-case-attach run-benchmarks "./results.csv"
-                - cd -
-                - cd ci
-                - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
-                - mv ./processed_results.csv ../processed_results_close.csv
-                - cd -
+                - popd
+                - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+                - mv ./processed_results.csv ./processed_results_close.csv
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-benchmarks coredump.tar.gz
-                - lava-test-case-attach run-benchmarks "./processed_results_close.csv"
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+                - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_close.csv"
+                - ./ci/lava/upload_artifact.sh processed_results_close.csv "results/${JENKINS_BUILD_ID}/processed_results_close.csv"
index 0ea287247c273848926eeddfd1efd95f28793c2b..695999d221b08dc97f400c3e66b7e12ca3abe643 100644 (file)
@@ -3,12 +3,13 @@ metadata:
         name: benchmark-syscall-failing-ioctl
         description: "Perform syscall tracing benchmark of failing ioctl"
 params:
-    JENKINS_JOBNAME: "default jobname"
+    JENKINS_BUILD_ID: "invalid_jenkins_build_id"
 
 install:
         deps:
                 - python3-pandas
                 - python3-numpy
+                - curl
         git-repos:
                 - url: https://github.com/lttng/lttng-ci
                   destination: ci
@@ -23,15 +24,13 @@ run:
                 - source /root/lttngvenv/activate
                 - export BENCHMARK_DIR=$(mktemp --directory)/bm
                 - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
-                - cd $BENCHMARK_DIR
+                - pushd $BENCHMARK_DIR
                 - lava-test-case build-benchmarks --shell "make"
                 - lava-test-case run-benchmarks --shell "./run.sh failing-ioctl sys_ioctl"
-                - lava-test-case-attach run-benchmarks "./results.csv"
-                - cd -
-                - cd ci
-                - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
-                - mv ./processed_results.csv ../processed_results_ioctl.csv
-                - cd -
+                - popd
+                - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+                - mv ./processed_results.csv ./processed_results_ioctl.csv
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-benchmarks coredump.tar.gz
-                - lava-test-case-attach run-benchmarks "./processed_results_ioctl.csv"
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+                - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_ioctl.csv"
+                - ./ci/lava/upload_artifact.sh processed_results_ioctl.csv "results/${JENKINS_BUILD_ID}/processed_results_ioctl.csv"
index 790ae361b0888b0b9e919b139513865bf2bc7a70..2235dc702d130fd4a1c3d816bb2661a437c92aeb 100644 (file)
@@ -3,12 +3,13 @@ metadata:
         name: benchmark-syscall-failing-open-efault
         description: "Perform syscall tracing benchmark of failing open-efault"
 params:
-    JENKINS_JOBNAME: "default jobname"
+    JENKINS_BUILD_ID: "invalid_jenkins_build_id"
 
 install:
         deps:
                 - python3-pandas
                 - python3-numpy
+                - curl
         git-repos:
                 - url: https://github.com/lttng/lttng-ci
                   destination: ci
@@ -23,15 +24,13 @@ run:
                 - source /root/lttngvenv/activate
                 - export BENCHMARK_DIR=$(mktemp --directory)/bm
                 - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
-                - cd $BENCHMARK_DIR
+                - pushd $BENCHMARK_DIR
                 - lava-test-case build-benchmarks --shell "make"
                 - lava-test-case run-benchmarks --shell "./run.sh failing-open-efault sys_open"
-                - lava-test-case-attach run-benchmarks "./results.csv"
-                - cd -
-                - cd ci
-                - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
-                - mv ./processed_results.csv ../processed_results_open_efault.csv
-                - cd -
+                - popd
+                - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+                - mv ./processed_results.csv ./processed_results_open_efault.csv
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-benchmarks coredump.tar.gz
-                - lava-test-case-attach run-benchmarks "./processed_results_open_efault.csv"
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+                - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_open_efault.csv"
+                - ./ci/lava/upload_artifact.sh processed_results_open_efault.csv "results/${JENKINS_BUILD_ID}/processed_results_open_efault.csv"
index 34c4daca4fdbbbad4410d4953e6fbcfe67e42c43..5eee24196601e9e6420d3fe75db7b9fd0c619d16 100644 (file)
@@ -3,12 +3,13 @@ metadata:
         name: benchmark-syscall-failing-open-enoent
         description: "Perform syscall tracing benchmark of failing open-enoent"
 params:
-    JENKINS_JOBNAME: "default jobname"
+    JENKINS_BUILD_ID: "invalid_jenkins_build_id"
 
 install:
         deps:
                 - python3-pandas
                 - python3-numpy
+                - curl
         git-repos:
                 - url: https://github.com/lttng/lttng-ci
                   destination: ci
@@ -23,15 +24,13 @@ run:
                 - source /root/lttngvenv/activate
                 - export BENCHMARK_DIR=$(mktemp --directory)/bm
                 - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
-                - cd $BENCHMARK_DIR
+                - pushd $BENCHMARK_DIR
                 - lava-test-case build-benchmarks --shell "make"
                 - lava-test-case run-benchmarks --shell "./run.sh failing-open-enoent sys_open"
-                - lava-test-case-attach run-benchmarks "./results.csv"
-                - cd -
-                - cd ci
-                - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
-                - mv ./processed_results.csv ../processed_results_open_enoent.csv
-                - cd -
+                - popd
+                - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+                - mv ./processed_results.csv ./processed_results_open_enoent.csv
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-benchmarks coredump.tar.gz
-                - lava-test-case-attach run-benchmarks "./processed_results_open_enoent.csv"
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+                - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_open_enoent.csv"
+                - ./ci/lava/upload_artifact.sh processed_results_open_enoent.csv "results/${JENKINS_BUILD_ID}/processed_results_open_enoent.csv"
index a5db6231561c25fc7bf9f301b669acb6fa25ac1f..5c7c67c4bc8bf5243fd8273a6aa7f0f94c5714d0 100644 (file)
@@ -2,20 +2,28 @@ metadata:
         format: Lava-Test Test Definition 1.0
         name: lttng-kernel-test
         description: "Run kernel test suite"
+params:
+    JENKINS_BUILD_ID: "invalid_jenkins_build_id"
+
 install:
+        deps:
+                - curl
+        git-repos:
+                - url: https://github.com/lttng/lttng-ci
+                  destination: ci
+                  branch: master
         steps:
                 - export TMPDIR="/tmp"
-                - cd
                 - ulimit -c unlimited
                 - mkdir -p coredump
                 - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
 run:
         steps:
                 - source /root/lttngvenv/activate
-                - cd /root/lttngvenv/src/lttng-tools
+                - pushd /root/lttngvenv/src/lttng-tools
                 - lava-test-case build-test-suite --shell "make"
                 - cd tests
                 - lava-test-case run-tests --shell "prove --verbose --merge --exec '' - < root_regression"
-                - cd
+                - popd
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-tests coredump.tar.gz
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
index a8f288da38bb48155e208b093377c9c1db24b4ac..b952a2bdee1f68acf8b1c3360d440d17729344ce 100644 (file)
@@ -3,12 +3,13 @@ metadata:
         name: benchmark-lttng-test-filter
         description: "Perform syscall tracing benchmark of the lttng-test-filter"
 params:
-    JENKINS_JOBNAME: "default jobname"
+    JENKINS_BUILD_ID: "invalid_jenkins_build_id"
 
 install:
         deps:
                 - python3-pandas
                 - python3-numpy
+                - curl
         git-repos:
                 - url: https://github.com/lttng/lttng-ci
                   destination: ci
@@ -23,16 +24,14 @@ run:
                 - source /root/lttngvenv/activate
                 - export BENCHMARK_DIR=$(mktemp --directory)/bm
                 - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
-                - cd $BENCHMARK_DIR
+                - pushd $BENCHMARK_DIR
                 - modprobe lttng-test
                 - lava-test-case build-benchmarks --shell "make"
                 - lava-test-case run-benchmarks --shell "./run.sh lttng-test-filter lttng_test_filter_event"
-                - lava-test-case-attach run-benchmarks "./results.csv"
-                - cd -
-                - cd ci
-                - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
-                - mv ./processed_results.csv ../processed_results_lttng_test_filter.csv
-                - cd -
+                - popd
+                - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+                - mv ./processed_results.csv ./processed_results_lttng_test_filter.csv
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-benchmarks coredump.tar.gz
-                - lava-test-case-attach run-benchmarks "./processed_results_lttng_test_filter.csv"
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+                - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_lttng_test_filter.csv"
+                - ./ci/lava/upload_artifact.sh processed_results_lttng_test_filter.csv "results/${JENKINS_BUILD_ID}/processed_results_lttng_test_filter.csv"
index c03892b9c49825178ce55e9316ce5623f4c6fee9..33110a709b8774a2ae7fade5d32e68fa11bd3463 100644 (file)
@@ -2,23 +2,29 @@ metadata:
         format: Lava-Test Test Definition 1.0
         name: lttng-perf-tests
         description: "Run perf regression test suite"
+params:
+    JENKINS_BUILD_ID: "invalid_jenkins_build_id"
 install:
         deps:
                 - libpfm4-dev
+                - curl
+        git-repos:
+                - url: https://github.com/lttng/lttng-ci
+                  destination: ci
+                  branch: master
         steps:
                 - export TMPDIR="/tmp"
-                - cd
                 - ulimit -c unlimited
                 - mkdir -p coredump
                 - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
 run:
         steps:
                 - source /root/lttngvenv/activate
-                - cd /root/lttngvenv/src/lttng-tools
+                - pushd /root/lttngvenv/src/lttng-tools
                 - lava-test-case build-test-suite --shell "make"
                 - cd tests
                 #Need to check if the file is present for branches where the testcase was not backported
                 - lava-test-case run-tests --shell "if [ -e perf_regression ]; then prove --verbose --merge --exec '' - < perf_regression; else echo 'perf_regression not found'; fi"
-                - cd
+                - popd
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-tests coredump.tar.gz
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
index f4da5cd67a770537c19ef52505f4859647bb36a3..2342aa0e50ab7faa8fe1799af67ef3889831fd6e 100644 (file)
@@ -3,7 +3,7 @@ metadata:
         name: benchmark-raw-syscall-getpid
         description: "Perform syscall tracing benchmark of the raw syscall getpid"
 params:
-    JENKINS_JOBNAME: "default jobname"
+    JENKINS_BUILD_ID: "invalid_jenkins_build_id"
 
 install:
         deps:
@@ -23,15 +23,14 @@ run:
                 - source /root/lttngvenv/activate
                 - export BENCHMARK_DIR=$(mktemp --directory)/bm
                 - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
-                - cd $BENCHMARK_DIR
+                - pushd $BENCHMARK_DIR
                 - lava-test-case build-benchmarks --shell "make"
                 - lava-test-case run-benchmarks --shell "./run.sh raw-syscall-getpid sys_getpid"
                 - lava-test-case-attach run-benchmarks "./results.csv"
-                - cd -
-                - cd ci
-                - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
-                - mv ./processed_results.csv ../processed_results_raw_syscall_getpid.csv
-                - cd -
+                - popd
+                - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+                - mv ./processed_results.csv ./processed_results_raw_syscall_getpid.csv
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-benchmarks coredump.tar.gz
-                - lava-test-case-attach run-benchmarks "./processed_results_raw_syscall_getpid.csv"
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+                - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_raw_syscall_getpid.csv"
+                - ./ci/lava/upload_artifact.sh processed_results_raw_syscall_getpid.csv "results/${JENKINS_BUILD_ID}/processed_results_raw_syscall_getpid.csv"
index 0443984f813fb058b321617b9af7496b2c5c2215..08ae82f2459260a7af5316870926c76c51dd8073 100644 (file)
@@ -3,12 +3,13 @@ metadata:
         name: benchmark-syscall-success-dup-close
         description: "Perform syscall tracing benchmark of successful dup and close"
 params:
-    JENKINS_JOBNAME: "default jobname"
+    JENKINS_BUILD_ID: "invalid_jenkins_build_id"
 
 install:
         deps:
                 - python3-pandas
                 - python3-numpy
+                - curl
         git-repos:
                 - url: https://github.com/lttng/lttng-ci
                   destination: ci
@@ -23,15 +24,14 @@ run:
                 - source /root/lttngvenv/activate
                 - export BENCHMARK_DIR=$(mktemp --directory)/bm
                 - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
-                - cd $BENCHMARK_DIR
+                - pushd $BENCHMARK_DIR
                 - lava-test-case build-benchmarks --shell "make"
                 - lava-test-case run-benchmarks --shell "./run.sh success-dup-close sys_close,sys_dup"
                 - lava-test-case-attach run-benchmarks "./results.csv"
-                - cd -
-                - cd ci
-                - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
-                - mv ./processed_results.csv ../processed_results_dup_close.csv
-                - cd -
+                - popd
+                - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+                - mv ./processed_results.csv ./processed_results_dup_close.csv
                 - tar czf coredump.tar.gz coredump
-                - lava-test-case-attach run-benchmarks coredump.tar.gz
-                - lava-test-case-attach run-benchmarks "./processed_results_dup_close.csv"
+                - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+                - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_dup_close.csv"
+                - ./ci/lava/upload_artifact.sh processed_results_dup_close.csv "results/${JENKINS_BUILD_ID}/processed_results_dup_close.csv"
diff --git a/lava/upload_artifact.sh b/lava/upload_artifact.sh
new file mode 100755 (executable)
index 0000000..333593b
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/bash -eux
+# Copyright (C) 2018 - Jonathan Rajotte-Julien <jonthan.rajotte-julien@efficios.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+bucket=lava
+file=$1
+#Path must include the file name
+path=$2
+
+host=obj.internal.efficios.com
+s3_k='jenkins'
+s3_s='echo123456'
+
+resource="/${bucket}/${path}"
+content_type="application/octet-stream"
+date=`date -R`
+_signature="PUT\n\n${content_type}\n${date}\n${resource}"
+signature=`echo -en ${_signature} | openssl sha1 -hmac ${s3_s} -binary | base64`
+
+curl -v -X PUT -T "${file}" \
+          -H "Host: $host" \
+          -H "Date: ${date}" \
+          -H "Content-Type: ${content_type}" \
+          -H "Authorization: AWS ${s3_k}:${signature}" \
+          https://$host${resource}
index cb1f7794dc533862a0934a0d1692201274169afa..3cf1131288056e1a27cc1334ac064fd5d55ddc31 100644 (file)
@@ -22,12 +22,15 @@ import random
 import sys
 import time
 import xmlrpc.client
+from urllib.parse import urljoin
+from urllib.request import urlretrieve
 from collections import OrderedDict
 from enum import Enum
 
 USERNAME = 'frdeso'
 HOSTNAME = 'lava-master.internal.efficios.com'
 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
+OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/"
 
 class TestType(Enum):
     baremetal_benchmarks=1
@@ -71,8 +74,7 @@ def check_job_all_test_cases_state_count(server, job):
 
 # Get the benchmark results from the lava bundle
 # save them as CSV files localy
-def fetch_benchmark_results(server, job):
-    content = get_job_bundle_content(server, job)
+def fetch_benchmark_results(build_id):
     testcases = ['processed_results_close.csv',
             'processed_results_ioctl.csv',
             'processed_results_open_efault.csv',
@@ -80,25 +82,9 @@ def fetch_benchmark_results(server, job):
             'processed_results_dup_close.csv',
             'processed_results_raw_syscall_getpid.csv',
             'processed_results_lttng_test_filter.csv']
-
-    # The result bundle is a large JSON containing the results of every testcase
-    # of the LAVA job as well as the files that were attached during the run.
-    # We need to iterate over this JSON to get the base64 representation of the
-    # benchmark results produced during the run.
-    for run in content['test_runs']:
-        # We only care of the benchmark testcases
-        if 'benchmark-' in run['test_id']:
-            if 'test_results' in run:
-                for res in run['test_results']:
-                    if 'attachments' in res:
-                        for a in res['attachments']:
-                            # We only save the results file
-                            if a['pathname'] in testcases:
-                                with open(a['pathname'],'wb') as f:
-                                    # Convert the b64 representation of the
-                                    # result file and write it to a file
-                                    # in the current working directory
-                                    f.write(base64.b64decode(a['content']))
+    for testcase in testcases:
+        url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
+        urlretrieve(url, case)
 
 # Parse the attachment of the testcase to fetch the stdout of the test suite
 def print_test_output(server, job):
@@ -186,7 +172,7 @@ def get_config_cmd(build_device):
                 ])
     return command
 
-def get_baremetal_benchmarks_cmd():
+def get_baremetal_benchmarks_cmd(build_id):
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -194,37 +180,44 @@ def get_baremetal_benchmarks_cmd():
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/failing-close.yml'
+                    'testdef': 'lava/system-tests/failing-close.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/failing-ioctl.yml'
+                    'testdef': 'lava/system-tests/failing-ioctl.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/failing-open-efault.yml'
+                    'testdef': 'lava/system-tests/failing-open-efault.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/success-dup-close.yml'
+                    'testdef': 'lava/system-tests/success-dup-close.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/raw-syscall-getpid.yml'
+                    'testdef': 'lava/system-tests/raw-syscall-getpid.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/failing-open-enoent.yml'
+                    'testdef': 'lava/system-tests/failing-open-enoent.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/lttng-test-filter.yml'
+                    'testdef': 'lava/system-tests/lttng-test-filter.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 }
                 ],
             'timeout': 7200
@@ -232,7 +225,7 @@ def get_baremetal_benchmarks_cmd():
         })
     return command
 
-def get_baremetal_tests_cmd():
+def get_baremetal_tests_cmd(build_id):
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -240,7 +233,8 @@ def get_baremetal_tests_cmd():
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/perf-tests.yml'
+                    'testdef': 'lava/system-tests/perf-tests.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 }
                 ],
             'timeout': 3600
@@ -248,7 +242,7 @@ def get_baremetal_tests_cmd():
         })
     return command
 
-def get_kvm_tests_cmd():
+def get_kvm_tests_cmd(build_id):
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -256,12 +250,14 @@ def get_kvm_tests_cmd():
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/kernel-tests.yml'
+                    'testdef': 'lava/system-tests/kernel-tests.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
-                    'testdef': 'lava/system-tests/destructive-tests.yml'
+                    'testdef': 'lava/system-tests/destructive-tests.yml',
+                    'parameters': { 'JENKINS_BUILD_ID': build_id }
                 }
                 ],
             'timeout': 7200
@@ -404,6 +400,7 @@ def main():
     parser.add_argument('-k', '--kernel', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
     parser.add_argument('-tc', '--tools-commit', required=True)
+    parser.add_argument('-id', '--build-id', required=True)
     parser.add_argument('-uc', '--ust-commit', required=False)
     args = parser.parse_args()
 
@@ -441,7 +438,7 @@ def main():
     if test_type is TestType.baremetal_benchmarks:
         j['actions'].append(get_config_cmd('x86'))
         j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
-        j['actions'].append(get_baremetal_benchmarks_cmd())
+        j['actions'].append(get_baremetal_benchmarks_cmd(args.build_id))
         j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
     elif test_type is TestType.baremetal_tests:
         if args.ust_commit is None:
@@ -449,7 +446,7 @@ def main():
             return -1
         j['actions'].append(get_config_cmd('x86'))
         j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
-        j['actions'].append(get_baremetal_tests_cmd())
+        j['actions'].append(get_baremetal_tests_cmd(args.build_id))
         j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
     elif test_type  is TestType.kvm_tests:
         if args.ust_commit is None:
@@ -457,7 +454,7 @@ def main():
             return -1
         j['actions'].append(get_config_cmd('kvm'))
         j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
-        j['actions'].append(get_kvm_tests_cmd())
+        j['actions'].append(get_kvm_tests_cmd(args.build_id))
         j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
     elif test_type is TestType.kvm_fuzzing_tests:
         if args.ust_commit is None:
@@ -491,7 +488,7 @@ def main():
     if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
         print_test_output(server, jobid)
     elif test_type is TestType.baremetal_benchmarks:
-        fetch_benchmark_results(server, jobid)
+        fetch_benchmark_results(args.build_id)
 
     print('Job ended with {} status.'.format(jobstatus))
     if jobstatus not in 'Complete':
index 47cc61080416f31f386baf0269903218b963364f..2ad65123c957b776d750d0ed26e6866fc93a946f 100644 (file)
@@ -191,6 +191,7 @@ def main():
     parser.add_argument('-k', '--kernel', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
     parser.add_argument('-tc', '--tools-commit', required=True)
+    parser.add_argument('-id', '--build-id', required=True)
     parser.add_argument('-uc', '--ust-commit', required=False)
     parser.add_argument('-d', '--debug', required=False, action='store_true')
     args = parser.parse_args()
@@ -246,6 +247,7 @@ def main():
     context['kernel_url'] = args.kernel
     context['nfsrootfs_url'] = nfsrootfs
     context['lttng_modules_url'] = args.lmodule
+    context['jenkins_build_id'] = args.build_id
 
     context['kprobe_round_nb'] = 10
 
index 33378a44810e3d010e864320bf023ef31a7010df..437ea2ad449eb7378481f59696afc57795be53c2 100644 (file)
@@ -30,6 +30,7 @@ python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava2-submit.py \
                           -k "$S3_URL_KERNEL_IMAGE" \
                           -lm "$S3_URL_LTTNG_MODULES" \
                           -tc "$LTTNG_TOOLS_COMMIT_ID" \
+                          -id "$BUILD_TAG" \
                           --debug
 
 python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \
index b359c8e7a2766c9785401b3888660f83ae5e50b2..6d652e38b0117ae1748dcfedc70855267a8f9e95 100644 (file)
@@ -32,6 +32,7 @@ python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava2-submit.py \
                           -lm "$S3_URL_LTTNG_MODULES" \
                           -tc "$LTTNG_TOOLS_COMMIT_ID" \
                           -uc "$LTTNG_UST_COMMIT_ID" \
+                          -id "$BUILD_TAG" \
                           --debug
 
 python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \
index ae1cb36eafb54f05c97339e96d62ef75d59c87b8..be846af0701c3be9f3dca13a14c6efd66a23fdc3 100644 (file)
@@ -31,6 +31,7 @@ python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava2-submit.py \
                           -lm "$S3_URL_LTTNG_MODULES" \
                           -tc "$LTTNG_TOOLS_COMMIT_ID" \
                           -uc "$LTTNG_UST_COMMIT_ID" \
+                          -id "$BUILD_TAG" \
                           --debug
 
 python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \
index 7de803497158574c8ef289fdd6a5d26f9eab56d2..8b9722bbd2a978dc4ac8c3907d31302cc5c328ec 100644 (file)
@@ -31,6 +31,7 @@ python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava2-submit.py \
                           -lm "$S3_URL_LTTNG_MODULES" \
                           -tc "$LTTNG_TOOLS_COMMIT_ID" \
                           -uc "$LTTNG_UST_COMMIT_ID" \
+                          -id "$BUILD_TAG" \
                           --debug
 
 python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \
index 967dc82294f82f3595d5ac9054c88afed563d6c1..878da10a381749ee237a20e8863ae7d19f41eef8 100644 (file)
@@ -91,44 +91,64 @@ actions:
               from: git
               path: lava/system-tests/failing-close.yml
               name: failing-close
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
               path: lava/system-tests/failing-ioctl.yml
               name: failing-ioctl
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
               path: lava/system-tests/failing-open-efault.yml
               name: failing-open-efault
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
               path: lava/system-tests/success-dup-close.yml
               name: success-dup-close
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
               path: lava/system-tests/raw-syscall-getpid.yml
               name: raw-syscall-getpid
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
               path: lava/system-tests/failing-open-enoent.yml
               name: failing-open-enoent
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
               path: lava/system-tests/lttng-test-filter.yml
               name: lttng-test-filter
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
           {% elif test_type == TestType.baremetal_tests %}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
               path: lava/system-tests/perf-tests.yml
               name: perf-tests
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
           {% elif test_type == TestType.kvm_tests %}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
               path: lava/system-tests/kernel-tests.yml
               name: kernel-tests
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
               path: lava/system-tests/destructive-tests.yml
               name: destructive-tests
+              params:
+                JENKINS_BUILD_ID: {{ jenkins_build_id }}
           {% elif test_type == TestType.kvm_fuzzing_tests %}
             - repository: https://github.com/lttng/lttng-ci.git
               from: git
This page took 0.039239 seconds and 4 git commands to generate.