X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=scripts%2Flttng-baremetal-tests%2Flava-submit.py;h=47f857cf98ad51d343e6191656fb23703556a1ba;hb=d127abb86db8ec5cac84e4936b1169a29d94c9c8;hp=f9c7f0614be02e3d1dc5648645369d996c0f7faa;hpb=f3d4ee9f3283c362e1526705e554ff6bcb46e11c;p=lttng-ci.git diff --git a/scripts/lttng-baremetal-tests/lava-submit.py b/scripts/lttng-baremetal-tests/lava-submit.py index f9c7f06..47f857c 100644 --- a/scripts/lttng-baremetal-tests/lava-submit.py +++ b/scripts/lttng-baremetal-tests/lava-submit.py @@ -20,7 +20,7 @@ import json import os import sys import time -import xmlrpclib +import xmlrpc.client from collections import OrderedDict from enum import Enum @@ -29,12 +29,16 @@ HOSTNAME = 'lava-master.internal.efficios.com' SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com' class TestType(Enum): - benchmarks=1 - tests=2 + baremetal_benchmarks=1 + baremetal_tests=2 + kvm_tests=3 def get_job_bundle_content(server, job): - bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1'] - bundle = server.dashboard.get(bundle_sha) + try: + bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1'] + bundle = server.dashboard.get(bundle_sha) + except xmlrpc.client.Fault as f: + print('Error while fetching results bundle', f.faultString) return json.loads(bundle['content']) @@ -47,13 +51,46 @@ def check_job_all_test_cases_state_count(server, job): failed_tests=0 for run in content['test_runs']: for result in run['test_results']: - if 'test_case_id' in result: + if 'test_case_id' in result : if result['result'] in 'pass': passed_tests+=1 + elif result['test_case_id'] in 'wait_for_test_image_prompt': + # FIXME:This test is part of the boot action and fails + # randomly but doesn't affect the behaviour of the tests. + # No reply on the Lava IRC channel yet. We should update + # our Lava installation and try to reproduce it. This error + # was encountered ont the KVM trusty image only. Not seen + # on Xenial at this point. + pass else: failed_tests+=1 return (passed_tests, failed_tests) +# Get the benchmark results from the lava bundle +# save them as CSV files localy +def fetch_benchmark_results(server, job): + content = get_job_bundle_content(server, job) + testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv'] + + # The result bundle is a large JSON containing the results of every testcase + # of the LAVA job as well as the files that were attached during the run. + # We need to iterate over this JSON to get the base64 representation of the + # benchmark results produced during the run. + for run in content['test_runs']: + # We only care of the benchmark testcases + if 'benchmark-syscall-' in run['test_id']: + if 'test_results' in run: + for res in run['test_results']: + if 'attachments' in res: + for a in res['attachments']: + # We only save the results file + if a['pathname'] in testcases: + with open(a['pathname'],'wb') as f: + # Convert the b64 representation of the + # result file and write it to a file + # in the current working directory + f.write(base64.b64decode(a['content'])) + # Parse the attachment of the testcase to fetch the stdout of the test suite def print_test_output(server, job): content = get_job_bundle_content(server, job) @@ -66,7 +103,7 @@ def print_test_output(server, job): # Decode the base64 file and split on newlines to iterate # on list - testoutput = base64.b64decode(attachment['content']).split('\n') + testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n') # Create a generator to iterate on the lines and keeping # the state of the iterator across the two loops. @@ -113,14 +150,12 @@ def get_boot_cmd(): def get_config_cmd(build_device): packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \ 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \ - 'libelf-dev', 'libmount-dev', 'libxml2', 'python3-pandas', \ - 'python3-numpy'] + 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \ + 'libnuma-dev'] command = OrderedDict({ 'command': 'lava_command_run', 'parameters': { 'commands': [ - 'ifup eth0', - 'route -n', 'cat /etc/resolv.conf', 'echo nameserver 172.18.0.12 > /etc/resolv.conf', 'groupadd tracing' @@ -136,11 +171,12 @@ def get_config_cmd(build_device): 'depmod -a', 'locale-gen en_US.UTF-8', 'apt-get update', + 'apt-get upgrade', 'apt-get install -y {}'.format(' '.join(packages)) ]) return command -def get_benchmarks_cmd(): +def get_baremetal_benchmarks_cmd(): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -159,7 +195,18 @@ def get_benchmarks_cmd(): 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml' - }, + } + ], + 'timeout': 18000 + } + }) + return command + +def get_baremetal_tests_cmd(): + command = OrderedDict({ + 'command': 'lava_test_shell', + 'parameters': { + 'testdef_repos': [ { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', @@ -171,7 +218,7 @@ def get_benchmarks_cmd(): }) return command -def get_tests_cmd(): +def get_kvm_tests_cmd(): command = OrderedDict({ 'command': 'lava_test_shell', 'parameters': { @@ -180,6 +227,11 @@ def get_tests_cmd(): 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', 'testdef': 'lava/baremetal-tests/kernel-tests.yml' + }, + { + 'git-repo': 'https://github.com/lttng/lttng-ci.git', + 'revision': 'master', + 'testdef': 'lava/baremetal-tests/destructive-tests.yml' } ], 'timeout': 18000 @@ -204,8 +256,10 @@ def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modul 'parameters': { 'customize': {}, 'kernel': None, - 'rootfs': 'file:///var/lib/lava-server/default/media/images/trusty-grub.img.gz', - 'target_type': 'ubuntu' + 'target_type': 'ubuntu', + 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz', + 'login_prompt': 'kvm02 login:', + 'username': 'root' } }) @@ -243,6 +297,8 @@ def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None): 'command': 'lava_command_run', 'parameters': { 'commands': [ + 'pip3 install --upgrade pip', + 'hash -r', 'git clone https://github.com/frdeso/syscall-bench-it.git bm', 'pip3 install vlttng', ], @@ -250,7 +306,7 @@ def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None): } }) - vlttng_cmd = 'vlttng --jobs=16 --profile urcu-master' \ + vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \ ' --profile babeltrace-stable-1.4 ' \ ' --profile lttng-tools-master' \ ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \ @@ -283,45 +339,64 @@ def main(): parser.add_argument('-k', '--kernel', required=True) parser.add_argument('-km', '--kmodule', required=True) parser.add_argument('-lm', '--lmodule', required=True) - parser.add_argument('-l', '--lava-key', required=True) parser.add_argument('-tc', '--tools-commit', required=True) parser.add_argument('-uc', '--ust-commit', required=False) args = parser.parse_args() - if args.type in 'benchmarks': - test_type = TestType.benchmarks - elif args.type in 'tests': - test_type = TestType.tests + if args.type in 'baremetal-benchmarks': + test_type = TestType.baremetal_benchmarks + elif args.type in 'baremetal-tests': + test_type = TestType.baremetal_tests + elif args.type in 'kvm-tests': + test_type = TestType.kvm_tests else: print('argument -t/--type {} unrecognized. Exiting...'.format(args.type)) return -1 - if test_type is TestType.benchmarks: + lava_api_key = None + try: + lava_api_key = os.environ['LAVA_JENKINS_TOKEN'] + except Exception as e: + print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e ) + return -1 + + if test_type is TestType.baremetal_benchmarks: + j = create_new_job(args.jobname, build_device='x86') + j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule)) + elif test_type is TestType.baremetal_tests: j = create_new_job(args.jobname, build_device='x86') j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule)) - elif test_type is TestType.tests: + elif test_type is TestType.kvm_tests: j = create_new_job(args.jobname, build_device='kvm') j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule)) j['actions'].append(get_boot_cmd()) - if test_type is TestType.benchmarks: + if test_type is TestType.baremetal_benchmarks: j['actions'].append(get_config_cmd('x86')) j['actions'].append(get_env_setup_cmd('x86', args.tools_commit)) - j['actions'].append(get_benchmarks_cmd()) + j['actions'].append(get_baremetal_benchmarks_cmd()) j['actions'].append(get_results_cmd(stream_name='benchmark-kernel')) - elif test_type is TestType.tests: + elif test_type is TestType.baremetal_tests: + if args.ust_commit is None: + print('Tests runs need -uc/--ust-commit options. Exiting...') + return -1 + j['actions'].append(get_config_cmd('x86')) + j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit)) + j['actions'].append(get_baremetal_tests_cmd()) + j['actions'].append(get_results_cmd(stream_name='tests-kernel')) + elif test_type is TestType.kvm_tests: if args.ust_commit is None: print('Tests runs need -uc/--ust-commit options. Exiting...') return -1 j['actions'].append(get_config_cmd('kvm')) j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit)) - j['actions'].append(get_tests_cmd()) + j['actions'].append(get_kvm_tests_cmd()) j['actions'].append(get_results_cmd(stream_name='tests-kernel')) else: assert False, 'Unknown test type' - server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME)) + server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME)) jobid = server.scheduler.submit_job(json.dumps(j)) @@ -329,25 +404,30 @@ def main(): #Check the status of the job every 30 seconds jobstatus = server.scheduler.job_status(jobid)['job_status'] + not_running = False while jobstatus in 'Submitted' or jobstatus in 'Running': + if not_running is False and jobstatus in 'Running': + print('Job started running') + not_running = True time.sleep(30) jobstatus = server.scheduler.job_status(jobid)['job_status'] + if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests: + print_test_output(server, jobid) + elif test_type is TestType.baremetal_benchmarks: + fetch_benchmark_results(server, jobid) + print('Job ended with {} status.'.format(jobstatus)) if jobstatus not in 'Complete': return -1 - - passed, failed=check_job_all_test_cases_state_count(server, jobid) - - print('With {} passed and {} failed Lava test cases.'.format(passed, failed)) - - if test_type is TestType.tests: - print_test_output(server, jobid) - - if failed == 0: - return 0 else: - return -1 + passed, failed=check_job_all_test_cases_state_count(server, jobid) + print('With {} passed and {} failed Lava test cases.'.format(passed, failed)) + + if failed == 0: + return 0 + else: + return -1 if __name__ == "__main__": sys.exit(main())