Lava: Save benchmark result plots in image gallery
authorFrancis Deslauriers <francis.deslauriers@efficios.com>
Mon, 12 Dec 2016 22:24:34 +0000 (17:24 -0500)
committerFrancis Deslauriers <francis.deslauriers@efficios.com>
Wed, 18 Jan 2017 19:44:17 +0000 (14:44 -0500)
Signed-off-by: Francis Deslauriers <francis.deslauriers@efficios.com>
jobs/lttng-baremetal-tests.yaml
scripts/lttng-baremetal-tests/lava-submit.py

index 8583cdc9238939fa4fc5bcb2e9ec8bdf2e89ae8d..7f3ab057719626335e7bc9996af0bb9617e6f5d6 100644 (file)
             UST_BRANCH={lttngversion}
             BUILD_DEVICE=baremetal
     publishers:
+      - archive:
+          artifacts: '*.png,*.csv'
+          stable: true
+          do-not-fingerprint: true
       - email:
           recipients: 'francis.deslauriers@efficios.com'
+      - image-gallery:
+        - gallery-type: archived-images-gallery
+          title: Results
+          includes: '*.png'
     scm:
       - git:
           url: git://git-mirror.internal.efficios.com/lttng/lttng-tools.git
@@ -70,6 +78,7 @@
       - shell: !include-raw-escape: scripts/lttng-baremetal-tests/run-baremetal-benchmarks.sh
 
 
+
 - defaults:
     name: kvm_tests
     description: |
index 7669b1ce6262b992bce955f4084058d57d54f570..6602e334405da83eaf95dc7ad4d9d37bf48ef16a 100644 (file)
@@ -66,6 +66,31 @@ def check_job_all_test_cases_state_count(server, job):
                     failed_tests+=1
     return (passed_tests, failed_tests)
 
+# Get the benchmark results from the lava bundle
+# save them as CSV files localy
+def fetch_benchmark_results(server, job):
+    content = get_job_bundle_content(server, job)
+    testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
+
+    # The result bundle is a large JSON containing the results of every testcase
+    # of the LAVA job as well as the files that were attached during the run.
+    # We need to iterate over this JSON to get the base64 representation of the
+    # benchmark results produced during the run.
+    for run in content['test_runs']:
+        # We only care of the benchmark testcases
+        if 'benchmark-syscall-' in run['test_id']:
+            if 'test_results' in run:
+                for res in run['test_results']:
+                    if 'attachments' in res:
+                        for a in res['attachments']:
+                            # We only save the results file
+                            if a['pathname'] in testcases:
+                                with open(a['pathname'],'w') as f:
+                                    # Convert the b64 representation of the
+                                    # result file and write it to a file
+                                    # in the current working directory
+                                    f.write(base64.b64decode(a['content']))
+
 # Parse the attachment of the testcase to fetch the stdout of the test suite
 def print_test_output(server, job):
     content = get_job_bundle_content(server, job)
@@ -380,6 +405,8 @@ def main():
 
     if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
         print_test_output(server, jobid)
+    elif test_type is TestType.baremetal_benchmarks:
+        fetch_benchmark_results(server, jobid)
 
     print('Job ended with {} status.'.format(jobstatus))
     if jobstatus not in 'Complete':
This page took 0.025653 seconds and 4 git commands to generate.