Lava: Get the Lava API key from the environment rather than the cli arguments
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
index 5b71da87484947d0a4dd8dfbd363bfa1d2cd5ec4..2115718acb2cafbbff91165a3eabadeb100e4b65 100644 (file)
@@ -29,12 +29,16 @@ HOSTNAME = 'lava-master.internal.efficios.com'
 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
 
 class TestType(Enum):
-    benchmarks=1
-    tests=2
+    baremetal_benchmarks=1
+    baremetal_tests=2
+    kvm_tests=3
 
 def get_job_bundle_content(server, job):
-    bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
-    bundle = server.dashboard.get(bundle_sha)
+    try:
+        bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
+        bundle = server.dashboard.get(bundle_sha)
+    except Fault as f:
+        print 'Error while fetching results bundle', f
 
     return json.loads(bundle['content'])
 
@@ -62,6 +66,31 @@ def check_job_all_test_cases_state_count(server, job):
                     failed_tests+=1
     return (passed_tests, failed_tests)
 
+# Get the benchmark results from the lava bundle
+# save them as CSV files localy
+def fetch_benchmark_results(server, job):
+    content = get_job_bundle_content(server, job)
+    testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
+
+    # The result bundle is a large JSON containing the results of every testcase
+    # of the LAVA job as well as the files that were attached during the run.
+    # We need to iterate over this JSON to get the base64 representation of the
+    # benchmark results produced during the run.
+    for run in content['test_runs']:
+        # We only care of the benchmark testcases
+        if 'benchmark-syscall-' in run['test_id']:
+            if 'test_results' in run:
+                for res in run['test_results']:
+                    if 'attachments' in res:
+                        for a in res['attachments']:
+                            # We only save the results file
+                            if a['pathname'] in testcases:
+                                with open(a['pathname'],'w') as f:
+                                    # Convert the b64 representation of the
+                                    # result file and write it to a file
+                                    # in the current working directory
+                                    f.write(base64.b64decode(a['content']))
+
 # Parse the attachment of the testcase to fetch the stdout of the test suite
 def print_test_output(server, job):
     content = get_job_bundle_content(server, job)
@@ -121,7 +150,7 @@ def get_boot_cmd():
 def get_config_cmd(build_device):
     packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
             'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
-            'libelf-dev', 'libmount-dev', 'libxml2']
+            'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev']
     command = OrderedDict({
         'command': 'lava_command_run',
         'parameters': {
@@ -146,7 +175,7 @@ def get_config_cmd(build_device):
                 ])
     return command
 
-def get_benchmarks_cmd():
+def get_baremetal_benchmarks_cmd():
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -165,7 +194,18 @@ def get_benchmarks_cmd():
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
-                },
+                }
+                ],
+            'timeout': 18000
+            }
+        })
+    return command
+
+def get_baremetal_tests_cmd():
+    command = OrderedDict({
+        'command': 'lava_test_shell',
+        'parameters': {
+            'testdef_repos': [
                 {
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
@@ -177,7 +217,7 @@ def get_benchmarks_cmd():
         })
     return command
 
-def get_tests_cmd():
+def get_kvm_tests_cmd():
     command = OrderedDict({
         'command': 'lava_test_shell',
         'parameters': {
@@ -186,6 +226,11 @@ def get_tests_cmd():
                     'git-repo': 'https://github.com/lttng/lttng-ci.git',
                     'revision': 'master',
                     'testdef': 'lava/baremetal-tests/kernel-tests.yml'
+                },
+                {
+                    'git-repo': 'https://github.com/lttng/lttng-ci.git',
+                    'revision': 'master',
+                    'testdef': 'lava/baremetal-tests/destructive-tests.yml'
                 }
                 ],
             'timeout': 18000
@@ -293,45 +338,64 @@ def main():
     parser.add_argument('-k', '--kernel', required=True)
     parser.add_argument('-km', '--kmodule', required=True)
     parser.add_argument('-lm', '--lmodule', required=True)
-    parser.add_argument('-l', '--lava-key', required=True)
     parser.add_argument('-tc', '--tools-commit', required=True)
     parser.add_argument('-uc', '--ust-commit', required=False)
     args = parser.parse_args()
 
-    if args.type in 'benchmarks':
-        test_type = TestType.benchmarks
-    elif args.type in 'tests':
-        test_type = TestType.tests
+    if args.type in 'baremetal-benchmarks':
+        test_type = TestType.baremetal_benchmarks
+    elif args.type in 'baremetal-tests':
+        test_type = TestType.baremetal_tests
+    elif args.type in 'kvm-tests':
+        test_type = TestType.kvm_tests
     else:
         print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
         return -1
 
-    if test_type is TestType.benchmarks:
+    lava_api_key = None
+    try:
+        lava_api_key = os.environ['LAVA_FRDESO_TOKEN']
+    except Exception, e:
+        print('LAVA_FRDESO_TOKEN not found in the environment variable. Exiting...')
+        return -1
+
+    if test_type is TestType.baremetal_benchmarks:
         j = create_new_job(args.jobname, build_device='x86')
         j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
-    elif test_type  is TestType.tests:
+    elif test_type is TestType.baremetal_tests:
+        j = create_new_job(args.jobname, build_device='x86')
+        j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+    elif test_type  is TestType.kvm_tests:
         j = create_new_job(args.jobname, build_device='kvm')
         j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
 
     j['actions'].append(get_boot_cmd())
 
-    if test_type is TestType.benchmarks:
+    if test_type is TestType.baremetal_benchmarks:
         j['actions'].append(get_config_cmd('x86'))
         j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
-        j['actions'].append(get_benchmarks_cmd())
+        j['actions'].append(get_baremetal_benchmarks_cmd())
         j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
-    elif test_type  is TestType.tests:
+    elif test_type is TestType.baremetal_tests:
+        if args.ust_commit is None:
+            print('Tests runs need -uc/--ust-commit options. Exiting...')
+            return -1
+        j['actions'].append(get_config_cmd('x86'))
+        j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
+        j['actions'].append(get_baremetal_tests_cmd())
+        j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
+    elif test_type  is TestType.kvm_tests:
         if args.ust_commit is None:
             print('Tests runs need -uc/--ust-commit options. Exiting...')
             return -1
         j['actions'].append(get_config_cmd('kvm'))
         j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
-        j['actions'].append(get_tests_cmd())
+        j['actions'].append(get_kvm_tests_cmd())
         j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
     else:
         assert False, 'Unknown test type'
 
-    server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME))
+    server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
 
     jobid = server.scheduler.submit_job(json.dumps(j))
 
@@ -339,14 +403,20 @@ def main():
 
     #Check the status of the job every 30 seconds
     jobstatus = server.scheduler.job_status(jobid)['job_status']
+    not_running = False
     while jobstatus in 'Submitted' or jobstatus in 'Running':
+        if not_running is False and jobstatus in 'Running':
+            print('Job started running')
+            not_running = True
         time.sleep(30)
         jobstatus = server.scheduler.job_status(jobid)['job_status']
 
     passed, failed=check_job_all_test_cases_state_count(server, jobid)
 
-    if test_type is TestType.tests:
+    if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
         print_test_output(server, jobid)
+    elif test_type is TestType.baremetal_benchmarks:
+        fetch_benchmark_results(server, jobid)
 
     print('Job ended with {} status.'.format(jobstatus))
     if jobstatus not in 'Complete':
This page took 0.024418 seconds and 4 git commands to generate.