X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=scripts%2Fsystem-tests%2Flava2-submit.py;h=099f6d4e23b1955f8aad9a541b216c9394039e54;hb=30989989e6bf18a4e67708f697417345a8c77d1e;hp=614a1a973b301748f6d89a9247f7435faad304a4;hpb=c2f8bcb90801a37d5e32a1465ebfe85d8d7f66a8;p=lttng-ci.git diff --git a/scripts/system-tests/lava2-submit.py b/scripts/system-tests/lava2-submit.py index 614a1a9..099f6d4 100644 --- a/scripts/system-tests/lava2-submit.py +++ b/scripts/system-tests/lava2-submit.py @@ -18,6 +18,7 @@ import argparse import json import os import random +import re import sys import time import xmlrpc.client @@ -30,19 +31,28 @@ USERNAME = 'lava-jenkins' HOSTNAME = 'lava-master-02.internal.efficios.com' OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/" +def parse_stable_version(stable_version_string): + # Get the major and minor version numbers from the lttng version string. + version_match = re.search('stable-(\d).(\d\d)', stable_version_string) + + if version_match is not None: + major_version = int(version_match.group(1)) + minor_version = int(version_match.group(2)) + else: + # Setting to zero to make the comparison below easier. + major_version = 0 + minor_version = 0 + return major_version, minor_version + class TestType: """ Enum like for test type """ - baremetal_benchmarks = 1 - baremetal_tests = 2 - kvm_tests = 3 - kvm_fuzzing_tests = 4 + baremetal_tests = 1 + kvm_tests = 2 values = { - 'baremetal-benchmarks': baremetal_benchmarks, 'baremetal-tests': baremetal_tests, 'kvm-tests': kvm_tests, - 'kvm-fuzzing-tests': kvm_fuzzing_tests, } @@ -89,26 +99,6 @@ def check_job_all_test_cases_state_count(server, job): return (passed_tests, failed_tests) -def fetch_benchmark_results(build_id): - """ - Get the benchmark results from the objstore - save them as CSV files localy - """ - testcases = [ - 'processed_results_close.csv', - 'processed_results_ioctl.csv', - 'processed_results_open_efault.csv', - 'processed_results_open_enoent.csv', - 'processed_results_dup_close.csv', - 'processed_results_raw_syscall_getpid.csv', - 'processed_results_lttng_test_filter.csv', - ] - for testcase in testcases: - url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase)) - print('Fetching {}'.format(url)) - urlretrieve(url, testcase) - - def print_test_output(server, job): """ Parse the attachment of the testcase to fetch the stdout of the test suite @@ -125,25 +115,39 @@ def print_test_output(server, job): continue if line['msg'] == '': print('----- TEST SUITE OUTPUT END -----') - break + print_line = False + continue if print_line: print("{} {}".format(line['dt'], line['msg'])) def get_vlttng_cmd( - lttng_tools_url, lttng_tools_commit, lttng_ust_url=None, lttng_ust_commit=None + lttng_version, lttng_tools_url, lttng_tools_commit, lttng_ust_url=None, lttng_ust_commit=None ): """ Return vlttng cmd to be used in the job template for setup. """ + major_version, minor_version = parse_stable_version(lttng_version) + + urcu_profile = "" + if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11): + urcu_profile = "urcu-master" + else: + urcu_profile = "urcu-stable-0.12" + + # Starting with 2.14, babeltrace2 is the reader for testing. + if lttng_version == 'master' or (major_version >= 2 and minor_version >= 14): + babeltrace_profile = " --profile babeltrace2-stable-2.0 --profile babeltrace2-python" + else: + babeltrace_profile = " --profile babeltrace-stable-1.5 --profile babeltrace-python" + vlttng_cmd = ( - 'vlttng --jobs=$(nproc) --profile urcu-master' - ' --override projects.babeltrace.build-env.PYTHON=python3' + 'vlttng --jobs=$(nproc) --profile ' + urcu_profile + + ' --override projects.babeltrace.build-env.PYTHON=python3' ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' - ' --profile babeltrace-stable-1.4' - ' --profile babeltrace-python' - ' --profile lttng-tools-master' + + babeltrace_profile + + ' --profile lttng-tools-master' ' --override projects.lttng-tools.source=' + lttng_tools_url + ' --override projects.lttng-tools.checkout=' @@ -161,6 +165,12 @@ def get_vlttng_cmd( + ' --profile lttng-ust-no-man-pages' ) + + if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11): + vlttng_cmd += ( + ' --override projects.lttng-tools.configure+=--enable-test-sdt-uprobe' + ) + vlttng_path = '/tmp/virtenv' vlttng_cmd += ' ' + vlttng_path @@ -169,10 +179,12 @@ def get_vlttng_cmd( def main(): + send_retry_limit = 10 nfsrootfs = "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_xenial_2018-12-05.tar.gz" test_type = None parser = argparse.ArgumentParser(description='Launch baremetal test using Lava') parser.add_argument('-t', '--type', required=True) + parser.add_argument('-lv', '--lttng-version', required=True) parser.add_argument('-j', '--jobname', required=True) parser.add_argument('-k', '--kernel', required=True) parser.add_argument('-lm', '--lmodule', required=True) @@ -208,7 +220,7 @@ def main(): test_type = TestType.values[args.type] - if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]: + if test_type is TestType.baremetal_tests: device_type = DeviceType.x86 else: device_type = DeviceType.kvm @@ -216,9 +228,18 @@ def main(): vlttng_path = '/tmp/virtenv' vlttng_cmd = get_vlttng_cmd( - args.tools_url, args.tools_commit, args.ust_url, args.ust_commit + args.lttng_version, args.tools_url, args.tools_commit, args.ust_url, args.ust_commit ) + if args.lttng_version == "master": + lttng_version_string = "master" + elif args.lttng_version == "canary": + lttng_version_string = "2.10" + else: + major, minor = parse_stable_version(args.lttng_version) + lttng_version_string = str(major) + "." + str(minor) + + context = dict() context['DeviceType'] = DeviceType context['TestType'] = TestType @@ -230,6 +251,7 @@ def main(): context['vlttng_cmd'] = vlttng_cmd context['vlttng_path'] = vlttng_path + context['lttng_version_string'] = lttng_version_string context['kernel_url'] = args.kernel context['nfsrootfs_url'] = nfsrootfs @@ -251,7 +273,7 @@ def main(): 'http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME) ) - for attempt in range(10): + for attempt in range(1, send_retry_limit + 1): try: jobid = server.scheduler.submit_job(render) except xmlrpc.client.ProtocolError as error: @@ -264,6 +286,14 @@ def main(): continue else: break + # Early exit when the maximum number of retry is reached. + if attempt == send_retry_limit: + print( + 'Protocol error on submit, maximum number of retry reached ({})'.format( + attempt + ) + ) + return -1 print('Lava jobid:{}'.format(jobid)) print( @@ -292,8 +322,6 @@ def main(): if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests: print_test_output(server, jobid) - elif test_type is TestType.baremetal_benchmarks: - fetch_benchmark_results(args.build_id) passed, failed = check_job_all_test_cases_state_count(server, jobid) print('With {} passed and {} failed Lava test cases.'.format(passed, failed))