jjb: Build the Babeltrace Python bindings in Lava jobs
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
index 6602e334405da83eaf95dc7ad4d9d37bf48ef16a..5a3ed94e02f50af4e846eb6f85b7685f9d2ca0d7 100644 (file)
@@ -20,7 +20,7 @@ import json
 import os
 import sys
 import time
-import xmlrpclib
+import xmlrpc.client
 from collections import OrderedDict
 from enum import Enum
 
@@ -37,8 +37,8 @@ def get_job_bundle_content(server, job):
     try:
         bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
         bundle = server.dashboard.get(bundle_sha)
-    except Fault as f:
-        print 'Error while fetching results bundle', f
+    except xmlrpc.client.Fault as f:
+        print('Error while fetching results bundle', f.faultString)
 
     return json.loads(bundle['content'])
 
@@ -47,6 +47,12 @@ def get_job_bundle_content(server, job):
 def check_job_all_test_cases_state_count(server, job):
     content = get_job_bundle_content(server, job)
 
+    # FIXME:Those tests are part of the boot actions and fail randomly but
+    # doesn't affect the behaviour of the tests. We should update our Lava
+    # installation and try to reproduce it. This error was encountered on
+    # Ubuntu 16.04.
+    tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
+
     passed_tests=0
     failed_tests=0
     for run in content['test_runs']:
@@ -54,13 +60,7 @@ def check_job_all_test_cases_state_count(server, job):
             if 'test_case_id' in result :
                 if result['result'] in 'pass':
                     passed_tests+=1
-                elif result['test_case_id'] in 'wait_for_test_image_prompt':
-                    # FIXME:This test is part of the boot action and fails
-                    # randomly but doesn't affect the behaviour of the tests.
-                    # No reply on the Lava IRC channel yet. We should update
-                    # our Lava installation and try to reproduce it. This error
-                    # was encountered ont the KVM trusty image only. Not seen
-                    # on Xenial at this point.
+                elif result['test_case_id'] in tests_known_to_fail:
                     pass
                 else:
                     failed_tests+=1
@@ -70,7 +70,13 @@ def check_job_all_test_cases_state_count(server, job):
 # save them as CSV files localy
 def fetch_benchmark_results(server, job):
     content = get_job_bundle_content(server, job)
-    testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
+    testcases = ['processed_results_close.csv',
+            'processed_results_ioctl.csv',
+            'processed_results_open_efault.csv',
+            'processed_results_open_enoent.csv',
+            'processed_results_dup_close.csv',
+            'processed_results_raw_syscall_getpid.csv',
+            'processed_results_lttng_test_filter.csv']
 
     # The result bundle is a large JSON containing the results of every testcase
     # of the LAVA job as well as the files that were attached during the run.
@@ -78,14 +84,14 @@ def fetch_benchmark_results(server, job):
     # benchmark results produced during the run.
     for run in content['test_runs']:
         # We only care of the benchmark testcases
-        if 'benchmark-syscall-' in run['test_id']:
+        if 'benchmark-' in run['test_id']:
             if 'test_results' in run:
                 for res in run['test_results']:
                     if 'attachments' in res:
                         for a in res['attachments']:
                             # We only save the results file
                             if a['pathname'] in testcases:
-                                with open(a['pathname'],'w') as f:
+                                with open(a['pathname'],'wb') as f:
                                     # Convert the b64 representation of the
                                     # result file and write it to a file
                                     # in the current working directory
@@ -103,7 +109,7 @@ def print_test_output(server, job):
 
                     # Decode the base64 file and split on newlines to iterate
                     # on list
-                    testoutput = base64.b64decode(attachment['content']).split('\n')
+                    testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
 
                     # Create a generator to iterate on the lines and keeping
                     # the state of the iterator across the two loops.
@@ -150,7 +156,8 @@ def get_boot_cmd():
 def get_config_cmd(build_device):
     packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
             'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
-            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev']
+            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
+            'libnuma-dev', 'python3-dev']
     command = OrderedDict({
         'command': 'lava_command_run',
         'parameters': {
@@ -158,7 +165,8 @@ def get_config_cmd(build_device):
                 'cat /etc/resolv.conf',
                 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
                 'groupadd tracing'
-                ]
+                ],
+                'timeout':300
             }
         })
     if build_device in 'x86':
@@ -185,15 +193,35 @@ def get_baremetal_benchmarks_cmd():
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-close.yml'
                 },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
+                },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
                 },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/success-dup-close.yml'
+                },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml'
+                },
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
+                },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
                 }
                 ],
             'timeout': 18000
@@ -306,7 +334,10 @@ def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
         })
 
     vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
-                    ' --profile babeltrace-stable-1.4 ' \
+                    ' --override projects.babeltrace.build-env.PYTHON=python3' \
+                    ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
+                    ' --profile babeltrace-stable-1.4' \
+                    ' --profile babeltrace-python' \
                     ' --profile lttng-tools-master' \
                     ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
                     ' --profile lttng-tools-no-man-pages'
@@ -338,7 +369,6 @@ def main():
     parser.add_argument('-k', '--kernel', required=True)
     parser.add_argument('-km', '--kmodule', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
-    parser.add_argument('-l', '--lava-key', required=True)
     parser.add_argument('-tc', '--tools-commit', required=True)
     parser.add_argument('-uc', '--ust-commit', required=False)
     args = parser.parse_args()
@@ -353,6 +383,13 @@ def main():
         print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
         return -1
 
+    lava_api_key = None
+    try:
+        lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
+    except Exception as e:
+        print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
+        return -1
+
     if test_type is TestType.baremetal_benchmarks:
         j = create_new_job(args.jobname, build_device='x86')
         j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
@@ -389,20 +426,23 @@ def main():
     else:
         assert False, 'Unknown test type'
 
-    server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME))
+    server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
 
     jobid = server.scheduler.submit_job(json.dumps(j))
 
     print('Lava jobid:{}'.format(jobid))
+    print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
 
     #Check the status of the job every 30 seconds
     jobstatus = server.scheduler.job_status(jobid)['job_status']
+    not_running = False
     while jobstatus in 'Submitted' or jobstatus in 'Running':
+        if not_running is False and jobstatus in 'Running':
+            print('Job started running')
+            not_running = True
         time.sleep(30)
         jobstatus = server.scheduler.job_status(jobid)['job_status']
 
-    passed, failed=check_job_all_test_cases_state_count(server, jobid)
-
     if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
         print_test_output(server, jobid)
     elif test_type is TestType.baremetal_benchmarks:
@@ -412,12 +452,13 @@ def main():
     if jobstatus not in 'Complete':
         return -1
     else:
+        passed, failed=check_job_all_test_cases_state_count(server, jobid)
         print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
 
-    if failed == 0:
-        return 0
-    else:
-        return -1
+        if failed == 0:
+            return 0
+        else:
+            return -1
 
 if __name__ == "__main__":
     sys.exit(main())
This page took 0.026261 seconds and 4 git commands to generate.