X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=scripts%2Fsystem-tests%2Flava2-submit.py;h=1dbea2cebe03540b2d2d9dd3a7e12abfb20fcd58;hb=0be008d3b60d8e29995cc72eb33dc43350ab5ab5;hp=9565f0c8003c088f5bac3ecdff0419ed55f31e0e;hpb=9356eef7b98f46b660b982b02d303fd4744f57ef;p=lttng-ci.git diff --git a/scripts/system-tests/lava2-submit.py b/scripts/system-tests/lava2-submit.py index 9565f0c..1dbea2c 100644 --- a/scripts/system-tests/lava2-submit.py +++ b/scripts/system-tests/lava2-submit.py @@ -31,15 +31,26 @@ USERNAME = 'lava-jenkins' HOSTNAME = 'lava-master-02.internal.efficios.com' OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/" +def parse_stable_version(stable_version_string): + # Get the major and minor version numbers from the lttng version string. + version_match = re.search('stable-(\d).(\d\d)', stable_version_string) + + if version_match is not None: + major_version = int(version_match.group(1)) + minor_version = int(version_match.group(2)) + else: + # Setting to zero to make the comparison below easier. + major_version = 0 + minor_version = 0 + return major_version, minor_version + class TestType: """ Enum like for test type """ - baremetal_benchmarks = 1 - baremetal_tests = 2 - kvm_tests = 3 + baremetal_tests = 1 + kvm_tests = 2 values = { - 'baremetal-benchmarks': baremetal_benchmarks, 'baremetal-tests': baremetal_tests, 'kvm-tests': kvm_tests, } @@ -88,26 +99,6 @@ def check_job_all_test_cases_state_count(server, job): return (passed_tests, failed_tests) -def fetch_benchmark_results(build_id): - """ - Get the benchmark results from the objstore - save them as CSV files localy - """ - testcases = [ - 'processed_results_close.csv', - 'processed_results_ioctl.csv', - 'processed_results_open_efault.csv', - 'processed_results_open_enoent.csv', - 'processed_results_dup_close.csv', - 'processed_results_raw_syscall_getpid.csv', - 'processed_results_lttng_test_filter.csv', - ] - for testcase in testcases: - url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase)) - print('Fetching {}'.format(url)) - urlretrieve(url, testcase) - - def print_test_output(server, job): """ Parse the attachment of the testcase to fetch the stdout of the test suite @@ -137,13 +128,28 @@ def get_vlttng_cmd( Return vlttng cmd to be used in the job template for setup. """ + major_version, minor_version = parse_stable_version(lttng_version) + + urcu_profile = "" + if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11): + urcu_profile = "urcu-master" + else: + urcu_profile = "urcu-stable-0.12" + + # Starting with 2.14, babeltrace2 is the reader for testing. + if lttng_version == 'master' or (major_version >= 2 and minor_version >= 14): + babeltrace_profile = " --profile babeltrace2-stable-2.0 --profile babeltrace2-python" + babeltrace_overrides = " --override projects.babeltrace2.build-env.PYTHON=python3 --override projects.babeltrace2.build-env.PYTHON_CONFIG=python3-config -o projects.babeltrace2.configure+=--disable-man-pages" + else: + babeltrace_profile = " --profile babeltrace-stable-1.5 --profile babeltrace-python" + babeltrace_overrides = " --override projects.babeltrace.build-env.PYTHON=python3 --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config" + + vlttng_cmd = ( - 'vlttng --jobs=$(nproc) --profile urcu-master' - ' --override projects.babeltrace.build-env.PYTHON=python3' - ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' - ' --profile babeltrace-stable-1.4' - ' --profile babeltrace-python' - ' --profile lttng-tools-master' + 'vlttng --jobs=$(nproc) --profile ' + urcu_profile + + babeltrace_profile + + babeltrace_overrides + + ' --profile lttng-tools-master' ' --override projects.lttng-tools.source=' + lttng_tools_url + ' --override projects.lttng-tools.checkout=' @@ -162,17 +168,6 @@ def get_vlttng_cmd( ) - # Get the major and minor version numbers from the lttng version string. - version_match = re.search('stable-(\d).(\d\d)', lttng_version) - - if version_match is not None: - major_version = int(version_match.group(1)) - minor_version = int(version_match.group(2)) - else: - # Setting to zero to make the comparison below easier. - major_version = 0 - minor_version = 0 - if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11): vlttng_cmd += ( ' --override projects.lttng-tools.configure+=--enable-test-sdt-uprobe' @@ -186,7 +181,7 @@ def get_vlttng_cmd( def main(): - nfsrootfs = "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_xenial_2018-12-05.tar.gz" + send_retry_limit = 10 test_type = None parser = argparse.ArgumentParser(description='Launch baremetal test using Lava') parser.add_argument('-t', '--type', required=True) @@ -200,6 +195,12 @@ def main(): parser.add_argument('-uu', '--ust-url', required=False) parser.add_argument('-uc', '--ust-commit', required=False) parser.add_argument('-d', '--debug', required=False, action='store_true') + parser.add_argument( + '-r', '--rootfs-url', required=False, + default="https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_xenial_2018-12-05.tar.gz" + ) + parser.add_argument('--ci-repo', required=False, default='https://github.com/lttng/lttng-ci.git') + parser.add_argument('--ci-branch', required=False, default='master') args = parser.parse_args() if args.type not in TestType.values: @@ -226,7 +227,7 @@ def main(): test_type = TestType.values[args.type] - if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]: + if test_type is TestType.baremetal_tests: device_type = DeviceType.x86 else: device_type = DeviceType.kvm @@ -237,6 +238,15 @@ def main(): args.lttng_version, args.tools_url, args.tools_commit, args.ust_url, args.ust_commit ) + if args.lttng_version == "master": + lttng_version_string = "master" + elif args.lttng_version == "canary": + lttng_version_string = "2.13" + else: + major, minor = parse_stable_version(args.lttng_version) + lttng_version_string = str(major) + "." + str(minor) + + context = dict() context['DeviceType'] = DeviceType context['TestType'] = TestType @@ -248,14 +258,18 @@ def main(): context['vlttng_cmd'] = vlttng_cmd context['vlttng_path'] = vlttng_path + context['lttng_version_string'] = lttng_version_string context['kernel_url'] = args.kernel - context['nfsrootfs_url'] = nfsrootfs + context['nfsrootfs_url'] = args.rootfs_url context['lttng_modules_url'] = args.lmodule context['jenkins_build_id'] = args.build_id context['kprobe_round_nb'] = 10 + context['ci_repo'] = args.ci_repo + context['ci_branch'] = args.ci_branch + render = jinja_template.render(context) print('Job to be submitted:') @@ -269,7 +283,7 @@ def main(): 'http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME) ) - for attempt in range(10): + for attempt in range(1, send_retry_limit + 1): try: jobid = server.scheduler.submit_job(render) except xmlrpc.client.ProtocolError as error: @@ -282,6 +296,14 @@ def main(): continue else: break + # Early exit when the maximum number of retry is reached. + if attempt == send_retry_limit: + print( + 'Protocol error on submit, maximum number of retry reached ({})'.format( + attempt + ) + ) + return -1 print('Lava jobid:{}'.format(jobid)) print( @@ -310,8 +332,6 @@ def main(): if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests: print_test_output(server, jobid) - elif test_type is TestType.baremetal_benchmarks: - fetch_benchmark_results(args.build_id) passed, failed = check_job_all_test_cases_state_count(server, jobid) print('With {} passed and {} failed Lava test cases.'.format(passed, failed))