import json
import os
import random
+import re
import sys
import time
import xmlrpc.client
class TestType:
""" Enum like for test type """
- baremetal_benchmarks = 1
- baremetal_tests = 2
- kvm_tests = 3
+ baremetal_tests = 1
+ kvm_tests = 2
values = {
- 'baremetal-benchmarks': baremetal_benchmarks,
'baremetal-tests': baremetal_tests,
'kvm-tests': kvm_tests,
}
return (passed_tests, failed_tests)
-def fetch_benchmark_results(build_id):
- """
- Get the benchmark results from the objstore
- save them as CSV files localy
- """
- testcases = [
- 'processed_results_close.csv',
- 'processed_results_ioctl.csv',
- 'processed_results_open_efault.csv',
- 'processed_results_open_enoent.csv',
- 'processed_results_dup_close.csv',
- 'processed_results_raw_syscall_getpid.csv',
- 'processed_results_lttng_test_filter.csv',
- ]
- for testcase in testcases:
- url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
- print('Fetching {}'.format(url))
- urlretrieve(url, testcase)
-
-
def print_test_output(server, job):
"""
Parse the attachment of the testcase to fetch the stdout of the test suite
def get_vlttng_cmd(
- lttng_tools_url, lttng_tools_commit, lttng_ust_url=None, lttng_ust_commit=None
+ lttng_version, lttng_tools_url, lttng_tools_commit, lttng_ust_url=None, lttng_ust_commit=None
):
"""
Return vlttng cmd to be used in the job template for setup.
+ ' --profile lttng-ust-no-man-pages'
)
+
+ # Get the major and minor version numbers from the lttng version string.
+ version_match = re.search('stable-(\d).(\d\d)', lttng_version)
+
+ if version_match is not None:
+ major_version = int(version_match.group(1))
+ minor_version = int(version_match.group(2))
+ else:
+ # Setting to zero to make the comparison below easier.
+ major_version = 0
+ minor_version = 0
+
+ if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11):
+ vlttng_cmd += (
+ ' --override projects.lttng-tools.configure+=--enable-test-sdt-uprobe'
+ )
+
vlttng_path = '/tmp/virtenv'
vlttng_cmd += ' ' + vlttng_path
test_type = None
parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
parser.add_argument('-t', '--type', required=True)
+ parser.add_argument('-lv', '--lttng-version', required=True)
parser.add_argument('-j', '--jobname', required=True)
parser.add_argument('-k', '--kernel', required=True)
parser.add_argument('-lm', '--lmodule', required=True)
test_type = TestType.values[args.type]
- if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
+ if test_type is TestType.baremetal_tests:
device_type = DeviceType.x86
else:
device_type = DeviceType.kvm
vlttng_path = '/tmp/virtenv'
vlttng_cmd = get_vlttng_cmd(
- args.tools_url, args.tools_commit, args.ust_url, args.ust_commit
+ args.lttng_version, args.tools_url, args.tools_commit, args.ust_url, args.ust_commit
)
context = dict()
if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
print_test_output(server, jobid)
- elif test_type is TestType.baremetal_benchmarks:
- fetch_benchmark_results(args.build_id)
passed, failed = check_job_all_test_cases_state_count(server, jobid)
print('With {} passed and {} failed Lava test cases.'.format(passed, failed))