From 104ed94bb5a45d6cfa1f9e5ef5591f04888beab8 Mon Sep 17 00:00:00 2001 From: Francis Deslauriers Date: Mon, 12 Dec 2016 17:24:34 -0500 Subject: [PATCH] Lava: Save benchmark result plots in image gallery Signed-off-by: Francis Deslauriers --- jobs/lttng-baremetal-tests.yaml | 9 +++++++ scripts/lttng-baremetal-tests/lava-submit.py | 27 ++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/jobs/lttng-baremetal-tests.yaml b/jobs/lttng-baremetal-tests.yaml index 8583cdc..7f3ab05 100644 --- a/jobs/lttng-baremetal-tests.yaml +++ b/jobs/lttng-baremetal-tests.yaml @@ -31,8 +31,16 @@ UST_BRANCH={lttngversion} BUILD_DEVICE=baremetal publishers: + - archive: + artifacts: '*.png,*.csv' + stable: true + do-not-fingerprint: true - email: recipients: 'francis.deslauriers@efficios.com' + - image-gallery: + - gallery-type: archived-images-gallery + title: Results + includes: '*.png' scm: - git: url: git://git-mirror.internal.efficios.com/lttng/lttng-tools.git @@ -70,6 +78,7 @@ - shell: !include-raw-escape: scripts/lttng-baremetal-tests/run-baremetal-benchmarks.sh + - defaults: name: kvm_tests description: | diff --git a/scripts/lttng-baremetal-tests/lava-submit.py b/scripts/lttng-baremetal-tests/lava-submit.py index 7669b1c..6602e33 100644 --- a/scripts/lttng-baremetal-tests/lava-submit.py +++ b/scripts/lttng-baremetal-tests/lava-submit.py @@ -66,6 +66,31 @@ def check_job_all_test_cases_state_count(server, job): failed_tests+=1 return (passed_tests, failed_tests) +# Get the benchmark results from the lava bundle +# save them as CSV files localy +def fetch_benchmark_results(server, job): + content = get_job_bundle_content(server, job) + testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv'] + + # The result bundle is a large JSON containing the results of every testcase + # of the LAVA job as well as the files that were attached during the run. + # We need to iterate over this JSON to get the base64 representation of the + # benchmark results produced during the run. + for run in content['test_runs']: + # We only care of the benchmark testcases + if 'benchmark-syscall-' in run['test_id']: + if 'test_results' in run: + for res in run['test_results']: + if 'attachments' in res: + for a in res['attachments']: + # We only save the results file + if a['pathname'] in testcases: + with open(a['pathname'],'w') as f: + # Convert the b64 representation of the + # result file and write it to a file + # in the current working directory + f.write(base64.b64decode(a['content'])) + # Parse the attachment of the testcase to fetch the stdout of the test suite def print_test_output(server, job): content = get_job_bundle_content(server, job) @@ -380,6 +405,8 @@ def main(): if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests: print_test_output(server, jobid) + elif test_type is TestType.baremetal_benchmarks: + fetch_benchmark_results(server, jobid) print('Job ended with {} status.'.format(jobstatus)) if jobstatus not in 'Complete': -- 2.34.1