X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=scripts%2Flttng-baremetal-tests%2Flava-submit.py;h=77abff970b13e0d1946ec47d7506821fc1959b44;hb=64d4186531e1d38372a06e35100a70ea9bb7a7c1;hp=5b71da87484947d0a4dd8dfbd363bfa1d2cd5ec4;hpb=46fb8afaf5187abf5de5d6a2aa659e09887ef063;p=lttng-ci.git diff --git a/scripts/lttng-baremetal-tests/lava-submit.py b/scripts/lttng-baremetal-tests/lava-submit.py index 5b71da8..77abff9 100644 --- a/scripts/lttng-baremetal-tests/lava-submit.py +++ b/scripts/lttng-baremetal-tests/lava-submit.py @@ -20,7 +20,7 @@ import json import os import sys import time -import xmlrpclib +import xmlrpc.client from collections import OrderedDict from enum import Enum @@ -29,12 +29,16 @@ HOSTNAME = 'lava-master.internal.efficios.com' SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com' class TestType(Enum): - benchmarks=1 - tests=2 + baremetal_benchmarks=1 + baremetal_tests=2 + kvm_tests=3 def get_job_bundle_content(server, job): - bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1'] - bundle = server.dashboard.get(bundle_sha) + try: + bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1'] + bundle = server.dashboard.get(bundle_sha) + except xmlrpc.client.Fault as f: + print('Error while fetching results bundle', f.faultString) return json.loads(bundle['content']) @@ -62,6 +66,31 @@ def check_job_all_test_cases_state_count(server, job): failed_tests+=1 return (passed_tests, failed_tests) +# Get the benchmark results from the lava bundle +# save them as CSV files localy +def fetch_benchmark_results(server, job): + content = get_job_bundle_content(server, job) + testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv'] + + # The result bundle is a large JSON containing the results of every testcase + # of the LAVA job as well as the files that were attached during the run. + # We need to iterate over this JSON to get the base64 representation of the + # benchmark results produced during the run. + for run in content['test_runs']: + # We only care of the benchmark testcases + if 'benchmark-syscall-' in run['test_id']: + if 'test_results' in run: + for res in run['test_results']: + if 'attachments' in res: + for a in res['attachments']: + # We only save the results file + if a['pathname'] in testcases: + with open(a['pathname'],'wb') as f: + # Convert the b64 representation of the + # result file and write it to a file + # in the current working directory + f.write(base64.b64decode(a['content'])) + # Parse the attachment of the testcase to fetch the stdout of the test suite def print_test_output(server, job): content = get_job_bundle_content(server, job) @@ -74,7 +103,7 @@ def print_test_output(server, job): # Decode the base64 file and split on newlines to iterate # on list - testoutput = base64.b64decode(attachment['content']).split('\n') + testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n') # Create a generator to iterate on the lines and keeping # the state of the iterator across the two loops. @@ -121,7 +150,8 @@ def get_boot_cmd(): def get_config_cmd(build_device): packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \ 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \ - 'libelf-dev', 'libmount-dev', 'libxml2'] + 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \ + 'libnuma-dev'] command = OrderedDict({ 'command': 'lava_command_run', 'parameters': { @@ -129,7 +159,8 @@ def get_config_cmd(build_device): 'cat /etc/resolv.conf', 'echo nameserver 172.18.0.12 > /etc/resolv.conf', 'groupadd tracing' - ] + ], + 'timeout':300 } }) if build_device in 'x86': @@ -146,7 +177,7 @@ def get_config_cmd(build_device): ]) return command -def get_benchmarks_cmd(): +def get_baremetal_benchmarks_cmd(): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -165,7 +196,18 @@ def get_benchmarks_cmd(): 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml' - }, + } + ], + 'timeout': 18000 + } + }) + return command + +def get_baremetal_tests_cmd(): + command = OrderedDict({ + 'command': 'lava_test_shell', + 'parameters': { + 'testdef_repos': [ { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', @@ -177,7 +219,7 @@ def get_benchmarks_cmd(): }) return command -def get_tests_cmd(): +def get_kvm_tests_cmd(): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -186,6 +228,11 @@ def get_tests_cmd(): 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', 'testdef': 'lava/baremetal-tests/kernel-tests.yml' + }, + { + 'git-repo': 'https://github.com/lttng/lttng-ci.git', + 'revision': 'master', + 'testdef': 'lava/baremetal-tests/destructive-tests.yml' } ], 'timeout': 18000 @@ -293,71 +340,96 @@ def main(): parser.add_argument('-k', '--kernel', required=True) parser.add_argument('-km', '--kmodule', required=True) parser.add_argument('-lm', '--lmodule', required=True) - parser.add_argument('-l', '--lava-key', required=True) parser.add_argument('-tc', '--tools-commit', required=True) parser.add_argument('-uc', '--ust-commit', required=False) args = parser.parse_args() - if args.type in 'benchmarks': - test_type = TestType.benchmarks - elif args.type in 'tests': - test_type = TestType.tests + if args.type in 'baremetal-benchmarks': + test_type = TestType.baremetal_benchmarks + elif args.type in 'baremetal-tests': + test_type = TestType.baremetal_tests + elif args.type in 'kvm-tests': + test_type = TestType.kvm_tests else: print('argument -t/--type {} unrecognized. Exiting...'.format(args.type)) return -1 - if test_type is TestType.benchmarks: + lava_api_key = None + try: + lava_api_key = os.environ['LAVA_JENKINS_TOKEN'] + except Exception as e: + print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e ) + return -1 + + if test_type is TestType.baremetal_benchmarks: + j = create_new_job(args.jobname, build_device='x86') + j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule)) + elif test_type is TestType.baremetal_tests: j = create_new_job(args.jobname, build_device='x86') j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule)) - elif test_type is TestType.tests: + elif test_type is TestType.kvm_tests: j = create_new_job(args.jobname, build_device='kvm') j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule)) j['actions'].append(get_boot_cmd()) - if test_type is TestType.benchmarks: + if test_type is TestType.baremetal_benchmarks: j['actions'].append(get_config_cmd('x86')) j['actions'].append(get_env_setup_cmd('x86', args.tools_commit)) - j['actions'].append(get_benchmarks_cmd()) + j['actions'].append(get_baremetal_benchmarks_cmd()) j['actions'].append(get_results_cmd(stream_name='benchmark-kernel')) - elif test_type is TestType.tests: + elif test_type is TestType.baremetal_tests: + if args.ust_commit is None: + print('Tests runs need -uc/--ust-commit options. Exiting...') + return -1 + j['actions'].append(get_config_cmd('x86')) + j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit)) + j['actions'].append(get_baremetal_tests_cmd()) + j['actions'].append(get_results_cmd(stream_name='tests-kernel')) + elif test_type is TestType.kvm_tests: if args.ust_commit is None: print('Tests runs need -uc/--ust-commit options. Exiting...') return -1 j['actions'].append(get_config_cmd('kvm')) j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit)) - j['actions'].append(get_tests_cmd()) + j['actions'].append(get_kvm_tests_cmd()) j['actions'].append(get_results_cmd(stream_name='tests-kernel')) else: assert False, 'Unknown test type' - server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME)) + server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME)) jobid = server.scheduler.submit_job(json.dumps(j)) print('Lava jobid:{}'.format(jobid)) + print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid)) #Check the status of the job every 30 seconds jobstatus = server.scheduler.job_status(jobid)['job_status'] + not_running = False while jobstatus in 'Submitted' or jobstatus in 'Running': + if not_running is False and jobstatus in 'Running': + print('Job started running') + not_running = True time.sleep(30) jobstatus = server.scheduler.job_status(jobid)['job_status'] - passed, failed=check_job_all_test_cases_state_count(server, jobid) - - if test_type is TestType.tests: + if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests: print_test_output(server, jobid) + elif test_type is TestType.baremetal_benchmarks: + fetch_benchmark_results(server, jobid) print('Job ended with {} status.'.format(jobstatus)) if jobstatus not in 'Complete': return -1 else: + passed, failed=check_job_all_test_cases_state_count(server, jobid) print('With {} passed and {} failed Lava test cases.'.format(passed, failed)) - if failed == 0: - return 0 - else: - return -1 + if failed == 0: + return 0 + else: + return -1 if __name__ == "__main__": sys.exit(main())