Merge pull request #6 from frdeso/for_lttngci
authorMichael Jeanson <mjeanson@gmail.com>
Wed, 18 Jan 2017 20:25:34 +0000 (15:25 -0500)
committerGitHub <noreply@github.com>
Wed, 18 Jan 2017 20:25:34 +0000 (15:25 -0500)
Save and plot baremetal benchmark results

jobs/lttng-baremetal-tests.yaml
scripts/lttng-baremetal-tests/lava-submit.py
scripts/lttng-baremetal-tests/run-baremetal-benchmarks.sh
scripts/lttng-baremetal-tests/run-baremetal-tests.sh
scripts/lttng-baremetal-tests/run-kvm-tests.sh

index 8583cdc9238939fa4fc5bcb2e9ec8bdf2e89ae8d..f82972823d9038cefbd40376805713f7d0f66415 100644 (file)
@@ -16,7 +16,7 @@
           categories:
             - 'baremetal-tests'
     project-type: freestyle
-    node: 'master'
+    node: 'x86-64'
     wrappers:
       - workspace-cleanup
       - timestamps
       - credentials-binding:
           - text:
               credential-id: jenkins_lava_key
-              variable: LAVA_FRDESO_TOKEN
+              variable: LAVA_JENKINS_TOKEN
       - inject:
           properties-content: |
             TOOLS_BRANCH={lttngversion}
             UST_BRANCH={lttngversion}
             BUILD_DEVICE=baremetal
     publishers:
+      - archive:
+          artifacts: '*.png,*.csv'
+          stable: true
+          do-not-fingerprint: true
       - email:
           recipients: 'francis.deslauriers@efficios.com'
+      - image-gallery:
+        - gallery-type: archived-images-gallery
+          title: Results
+          includes: '*.png'
     scm:
       - git:
           url: git://git-mirror.internal.efficios.com/lttng/lttng-tools.git
@@ -69,7 +77,6 @@
           properties-file: properties.txt
       - shell: !include-raw-escape: scripts/lttng-baremetal-tests/run-baremetal-benchmarks.sh
 
-
 - defaults:
     name: kvm_tests
     description: |
       - credentials-binding:
           - text:
               credential-id: jenkins_lava_key
-              variable: LAVA_FRDESO_TOKEN
+              variable: LAVA_JENKINS_TOKEN
       - inject:
           properties-content: |
             BUILD_DEVICE=kvm
     publishers:
       - email:
           recipients: 'francis.deslauriers@efficios.com'
+      - ircbot:
+          strategy: new-failure-and-fixed
+          channels:
+            - name: '#lttng'
+
     builders:
       - shell: !include-raw-escape: scripts/lttng-baremetal-tests/generate-properties-master.sh
       - trigger-builds:
       - credentials-binding:
           - text:
               credential-id: jenkins_lava_key
-              variable: LAVA_FRDESO_TOKEN
+              variable: LAVA_JENKINS_TOKEN
       - inject:
           properties-content: |
             UST_BRANCH={lttngversion}
     publishers:
       - email:
           recipients: 'francis.deslauriers@efficios.com'
+      - ircbot:
+          strategy: new-failure-and-fixed
+          channels:
+            - name: '#lttng'
     builders:
       - shell: !include-raw-escape: scripts/lttng-baremetal-tests/generate-properties-master.sh
       - trigger-builds:
             - "master"
           fastpoll: true
           basedir: src/linux
-    wrappers:
-      - workspace-cleanup
-      - timestamps
-      - ansicolor
-      - credentials-binding:
-          - text:
-              credential-id: jenkins_lava_key
-              variable: LAVA_FRDESO_TOKEN
-      - inject:
-          properties-content: |
-            BUILD_DEVICE=baremetal
-    builders:
-      - shell: !include-raw-escape: scripts/lttng-baremetal-tests/generate-properties-master.sh
-      - trigger-builds:
-        - project: "build_kernel_PARAM"
-          property-file: 'properties.txt'
-          block: true
-      - inject:
-          properties-file: properties.txt
-      - shell: !include-raw-escape: scripts/lttng-baremetal-tests/run-baremetal-benchmarks.sh
+
 - job-template:
     name: baremetal_benchmarks_kmainline_l{lttngversion}
     defaults: baremetal_benchmarks
             - "master"
           fastpoll: true
           basedir: src/linux
-    wrappers:
-      - workspace-cleanup
-      - timestamps
-      - ansicolor
-      - credentials-binding:
-          - text:
-              credential-id: jenkins_lava_key
-              variable: LAVA_FRDESO_TOKEN
-      - inject:
-          properties-content: |
-            BUILD_DEVICE=baremetal
-    builders:
-      - shell: !include-raw-escape: scripts/lttng-baremetal-tests/generate-properties-master.sh
-      - trigger-builds:
-        - project: "build_kernel_PARAM"
-          property-file: 'properties.txt'
-          block: true
-      - inject:
-          properties-file: properties.txt
-      - shell: !include-raw-escape: scripts/lttng-baremetal-tests/run-baremetal-benchmarks.sh
 
 - job:
     name: build_kernel_PARAM
     concurrent: true
 
     logrotate:
-      numToKeep: 20
+      numToKeep: 50
     node: 'x86-64'
 
     wrappers:
index 7669b1ce6262b992bce955f4084058d57d54f570..47f857cf98ad51d343e6191656fb23703556a1ba 100644 (file)
@@ -20,7 +20,7 @@ import json
 import os
 import sys
 import time
-import xmlrpclib
+import xmlrpc.client
 from collections import OrderedDict
 from enum import Enum
 
@@ -37,8 +37,8 @@ def get_job_bundle_content(server, job):
     try:
         bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
         bundle = server.dashboard.get(bundle_sha)
-    except Fault as f:
-        print 'Error while fetching results bundle', f
+    except xmlrpc.client.Fault as f:
+        print('Error while fetching results bundle', f.faultString)
 
     return json.loads(bundle['content'])
 
@@ -66,6 +66,31 @@ def check_job_all_test_cases_state_count(server, job):
                     failed_tests+=1
     return (passed_tests, failed_tests)
 
+# Get the benchmark results from the lava bundle
+# save them as CSV files localy
+def fetch_benchmark_results(server, job):
+    content = get_job_bundle_content(server, job)
+    testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
+
+    # The result bundle is a large JSON containing the results of every testcase
+    # of the LAVA job as well as the files that were attached during the run.
+    # We need to iterate over this JSON to get the base64 representation of the
+    # benchmark results produced during the run.
+    for run in content['test_runs']:
+        # We only care of the benchmark testcases
+        if 'benchmark-syscall-' in run['test_id']:
+            if 'test_results' in run:
+                for res in run['test_results']:
+                    if 'attachments' in res:
+                        for a in res['attachments']:
+                            # We only save the results file
+                            if a['pathname'] in testcases:
+                                with open(a['pathname'],'wb') as f:
+                                    # Convert the b64 representation of the
+                                    # result file and write it to a file
+                                    # in the current working directory
+                                    f.write(base64.b64decode(a['content']))
+
 # Parse the attachment of the testcase to fetch the stdout of the test suite
 def print_test_output(server, job):
     content = get_job_bundle_content(server, job)
@@ -78,7 +103,7 @@ def print_test_output(server, job):
 
                     # Decode the base64 file and split on newlines to iterate
                     # on list
-                    testoutput = base64.b64decode(attachment['content']).split('\n')
+                    testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
 
                     # Create a generator to iterate on the lines and keeping
                     # the state of the iterator across the two loops.
@@ -125,7 +150,8 @@ def get_boot_cmd():
 def get_config_cmd(build_device):
     packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
             'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
-            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev']
+            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
+            'libnuma-dev']
     command = OrderedDict({
         'command': 'lava_command_run',
         'parameters': {
@@ -313,7 +339,6 @@ def main():
     parser.add_argument('-k', '--kernel', required=True)
     parser.add_argument('-km', '--kmodule', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
-    parser.add_argument('-l', '--lava-key', required=True)
     parser.add_argument('-tc', '--tools-commit', required=True)
     parser.add_argument('-uc', '--ust-commit', required=False)
     args = parser.parse_args()
@@ -328,6 +353,13 @@ def main():
         print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
         return -1
 
+    lava_api_key = None
+    try:
+        lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
+    except Exception as e:
+        print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
+        return -1
+
     if test_type is TestType.baremetal_benchmarks:
         j = create_new_job(args.jobname, build_device='x86')
         j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
@@ -364,7 +396,7 @@ def main():
     else:
         assert False, 'Unknown test type'
 
-    server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME))
+    server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
 
     jobid = server.scheduler.submit_job(json.dumps(j))
 
@@ -372,25 +404,30 @@ def main():
 
     #Check the status of the job every 30 seconds
     jobstatus = server.scheduler.job_status(jobid)['job_status']
+    not_running = False
     while jobstatus in 'Submitted' or jobstatus in 'Running':
+        if not_running is False and jobstatus in 'Running':
+            print('Job started running')
+            not_running = True
         time.sleep(30)
         jobstatus = server.scheduler.job_status(jobid)['job_status']
 
-    passed, failed=check_job_all_test_cases_state_count(server, jobid)
-
     if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
         print_test_output(server, jobid)
+    elif test_type is TestType.baremetal_benchmarks:
+        fetch_benchmark_results(server, jobid)
 
     print('Job ended with {} status.'.format(jobstatus))
     if jobstatus not in 'Complete':
         return -1
     else:
+        passed, failed=check_job_all_test_cases_state_count(server, jobid)
         print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
 
-    if failed == 0:
-        return 0
-    else:
-        return -1
+        if failed == 0:
+            return 0
+        else:
+            return -1
 
 if __name__ == "__main__":
     sys.exit(main())
index 4e2a1ee8e6a5cd0659a7df4b8acb8c19a69c0821..187205aaadf5aaf4a7ba9700c3ede9868a0ae3bc 100644 (file)
@@ -18,11 +18,9 @@ echo 'At this point, we built the modules and kernel if we needed to.'
 echo 'We can now launch the lava job using those artefacts'
 git clone https://github.com/lttng/lttng-ci
 
-set +x
-python lttng-ci/scripts/lttng-baremetal-tests/lava-submit.py \
+python3 -u lttng-ci/scripts/lttng-baremetal-tests/lava-submit.py \
                           -t baremetal-benchmarks \
                           -j "$JOB_NAME" \
-                          -l "$LAVA_FRDESO_TOKEN" \
                           -k "$STORAGE_KERNEL_IMAGE" \
                           -km "$STORAGE_LINUX_MODULES" \
                           -lm "$STORAGE_LTTNG_MODULES" \
index abe5619aa6e1c73df64afd07abb652b2f8641e43..1ddac2c66c359fd4a9cf527ecd594229349142f5 100644 (file)
@@ -18,11 +18,9 @@ echo 'At this point, we built the modules and kernel if we needed to.'
 echo 'We can now launch the lava job using those artefacts'
 git clone https://github.com/lttng/lttng-ci
 
-set +x
-python lttng-ci/scripts/lttng-baremetal-tests/lava-submit.py \
+python3 -u lttng-ci/scripts/lttng-baremetal-tests/lava-submit.py \
                           -t baremetal-tests \
                           -j "$JOB_NAME" \
-                          -l "$LAVA_FRDESO_TOKEN" \
                           -k "$STORAGE_KERNEL_IMAGE" \
                           -km "$STORAGE_LINUX_MODULES" \
                           -lm "$STORAGE_LTTNG_MODULES" \
index 417a5e5745efc64ce07d9afc1497d6621454ebba..217c92d94b9027f67bb1839bd7f7810964352f72 100644 (file)
@@ -18,11 +18,9 @@ echo 'At this point, we built the modules and kernel if we needed to.'
 echo 'We can now launch the lava job using those artefacts'
 git clone https://github.com/lttng/lttng-ci
 
-set +x
-python lttng-ci/scripts/lttng-baremetal-tests/lava-submit.py \
+python3 -u lttng-ci/scripts/lttng-baremetal-tests/lava-submit.py \
                           -t kvm-tests \
                           -j "$JOB_NAME" \
-                          -l "$LAVA_FRDESO_TOKEN" \
                           -k "$STORAGE_KERNEL_IMAGE" \
                           -km "$STORAGE_LINUX_MODULES" \
                           -lm "$STORAGE_LTTNG_MODULES" \
This page took 0.028038 seconds and 4 git commands to generate.