import json
import os
import random
+import re
import sys
import time
import xmlrpc.client
HOSTNAME = 'lava-master-02.internal.efficios.com'
OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/"
-class TestType():
+def parse_stable_version(stable_version_string):
+ # Get the major and minor version numbers from the lttng version string.
+ version_match = re.search('stable-(\d).(\d\d)', stable_version_string)
+
+ if version_match is not None:
+ major_version = int(version_match.group(1))
+ minor_version = int(version_match.group(2))
+ else:
+ # Setting to zero to make the comparison below easier.
+ major_version = 0
+ minor_version = 0
+ return major_version, minor_version
+
+
+class TestType:
""" Enum like for test type """
- baremetal_benchmarks = 1
- baremetal_tests = 2
- kvm_tests = 3
- kvm_fuzzing_tests = 4
+
+ baremetal_tests = 1
+ kvm_tests = 2
values = {
- 'baremetal-benchmarks' : baremetal_benchmarks,
- 'baremetal-tests' : baremetal_tests,
- 'kvm-tests' : kvm_tests,
- 'kvm-fuzzing-tests' : kvm_fuzzing_tests,
+ 'baremetal-tests': baremetal_tests,
+ 'kvm-tests': kvm_tests,
}
-class DeviceType():
+
+class DeviceType:
""" Enum like for device type """
+
x86 = 'x86'
kvm = 'qemu'
- values = {
- 'kvm' : kvm,
- 'x86' : x86,
- }
+ values = {'kvm': kvm, 'x86': x86}
+
def get_job_bundle_content(server, job):
try:
return json.loads(bundle['content'])
+
def check_job_all_test_cases_state_count(server, job):
"""
Parse the results bundle to see the run-tests testcase
"""
print("Testcase result:")
content = server.results.get_testjob_results_yaml(str(job))
- testcases = yaml.load(content)
+ testcases = yaml.unsafe_load(content)
passed_tests = 0
failed_tests = 0
for testcase in testcases:
if testcase['result'] != 'pass':
- print("\tFAILED {}\n\t\t See http://{}{}".format(
- testcase['name'],
- HOSTNAME,
- testcase['url']
- ))
+ print(
+ "\tFAILED {}\n\t\t See http://{}{}".format(
+ testcase['name'], HOSTNAME, testcase['url']
+ )
+ )
failed_tests += 1
else:
passed_tests += 1
return (passed_tests, failed_tests)
-def fetch_benchmark_results(build_id):
- """
- Get the benchmark results from the objstore
- save them as CSV files localy
- """
- testcases = ['processed_results_close.csv',
- 'processed_results_ioctl.csv',
- 'processed_results_open_efault.csv',
- 'processed_results_open_enoent.csv',
- 'processed_results_dup_close.csv',
- 'processed_results_raw_syscall_getpid.csv',
- 'processed_results_lttng_test_filter.csv']
- for testcase in testcases:
- url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
- print('Fetching {}'.format(url))
- urlretrieve(url, testcase)
def print_test_output(server, job):
"""
Parse the attachment of the testcase to fetch the stdout of the test suite
"""
job_finished, log = server.scheduler.jobs.logs(str(job))
- logs = yaml.load(log.data.decode('ascii'))
+ logs = yaml.unsafe_load(log.data.decode('ascii'))
print_line = False
for line in logs:
if line['lvl'] != 'target':
continue
if line['msg'] == '<LAVA_SIGNAL_ENDTC run-tests>':
print('----- TEST SUITE OUTPUT END -----')
- break
+ print_line = False
+ continue
if print_line:
print("{} {}".format(line['dt'], line['msg']))
-def get_vlttng_cmd(lttng_tools_url, lttng_tools_commit, lttng_ust_url=None, lttng_ust_commit=None):
+
+def get_vlttng_cmd(
+ lttng_version, lttng_tools_url, lttng_tools_commit, lttng_ust_url=None, lttng_ust_commit=None
+):
"""
Return vlttng cmd to be used in the job template for setup.
"""
- vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
- ' --override projects.babeltrace.build-env.PYTHON=python3' \
- ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
- ' --profile babeltrace-stable-1.4' \
- ' --profile babeltrace-python' \
- ' --profile lttng-tools-master' \
- ' --override projects.lttng-tools.source='+lttng_tools_url + \
- ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
- ' --profile lttng-tools-no-man-pages'
+ major_version, minor_version = parse_stable_version(lttng_version)
+
+ urcu_profile = ""
+ if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11):
+ urcu_profile = "urcu-master"
+ else:
+ urcu_profile = "urcu-stable-0.12"
+
+ # Starting with 2.14, babeltrace2 is the reader for testing.
+ if lttng_version == 'master' or (major_version >= 2 and minor_version >= 14):
+ babeltrace_profile = " --profile babeltrace2-stable-2.0 --profile babeltrace2-python"
+ babeltrace_overrides = " --override projects.babeltrace2.build-env.PYTHON=python3 --override projects.babeltrace2.build-env.PYTHON_CONFIG=python3-config -o projects.babeltrace2.configure+=--disable-man-pages"
+ else:
+ babeltrace_profile = " --profile babeltrace-stable-1.5 --profile babeltrace-python"
+ babeltrace_overrides = " --override projects.babeltrace.build-env.PYTHON=python3 --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config"
+
+
+ vlttng_cmd = (
+ 'vlttng --jobs=$(nproc) --profile ' + urcu_profile
+ + babeltrace_profile
+ + babeltrace_overrides
+ + ' --profile lttng-tools-master'
+ ' --override projects.lttng-tools.source='
+ + lttng_tools_url
+ + ' --override projects.lttng-tools.checkout='
+ + lttng_tools_commit
+ + ' --profile lttng-tools-no-man-pages'
+ )
if lttng_ust_commit is not None:
- vlttng_cmd += ' --profile lttng-ust-master ' \
- ' --override projects.lttng-ust.source='+lttng_ust_url + \
- ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
- ' --profile lttng-ust-no-man-pages'
+ vlttng_cmd += (
+ ' --profile lttng-ust-master '
+ ' --override projects.lttng-ust.source='
+ + lttng_ust_url
+ + ' --override projects.lttng-ust.checkout='
+ + lttng_ust_commit
+ + ' --profile lttng-ust-no-man-pages'
+ )
+
+
+ if lttng_version == 'master' or (major_version >= 2 and minor_version >= 11):
+ vlttng_cmd += (
+ ' --override projects.lttng-tools.configure+=--enable-test-sdt-uprobe'
+ )
vlttng_path = '/tmp/virtenv'
return vlttng_cmd
+
def main():
+ send_retry_limit = 10
nfsrootfs = "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_xenial_2018-12-05.tar.gz"
test_type = None
parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
parser.add_argument('-t', '--type', required=True)
+ parser.add_argument('-lv', '--lttng-version', required=True)
parser.add_argument('-j', '--jobname', required=True)
parser.add_argument('-k', '--kernel', required=True)
parser.add_argument('-lm', '--lmodule', required=True)
try:
lava_api_key = os.environ['LAVA2_JENKINS_TOKEN']
except Exception as error:
- print('LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...',
- error)
+ print(
+ 'LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...',
+ error,
+ )
return -1
jinja_loader = FileSystemLoader(os.path.dirname(os.path.realpath(__file__)))
- jinja_env = Environment(loader=jinja_loader, trim_blocks=True,
- lstrip_blocks=True)
+ jinja_env = Environment(loader=jinja_loader, trim_blocks=True, lstrip_blocks=True)
jinja_template = jinja_env.get_template('template_lava_job.jinja2')
test_type = TestType.values[args.type]
- if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
+ if test_type is TestType.baremetal_tests:
device_type = DeviceType.x86
else:
device_type = DeviceType.kvm
vlttng_path = '/tmp/virtenv'
- vlttng_cmd = get_vlttng_cmd(args.tools_url, args.tools_commit, args.ust_url, args.ust_commit)
+ vlttng_cmd = get_vlttng_cmd(
+ args.lttng_version, args.tools_url, args.tools_commit, args.ust_url, args.ust_commit
+ )
+
+ if args.lttng_version == "master":
+ lttng_version_string = "master"
+ elif args.lttng_version == "canary":
+ lttng_version_string = "2.10"
+ else:
+ major, minor = parse_stable_version(args.lttng_version)
+ lttng_version_string = str(major) + "." + str(minor)
+
context = dict()
context['DeviceType'] = DeviceType
context['vlttng_cmd'] = vlttng_cmd
context['vlttng_path'] = vlttng_path
+ context['lttng_version_string'] = lttng_version_string
context['kernel_url'] = args.kernel
context['nfsrootfs_url'] = nfsrootfs
if args.debug:
return 0
- server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
+ server = xmlrpc.client.ServerProxy(
+ 'http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME)
+ )
- for attempt in range(10):
+ for attempt in range(1, send_retry_limit + 1):
try:
jobid = server.scheduler.submit_job(render)
except xmlrpc.client.ProtocolError as error:
- print('Protocol error on submit, sleeping and retrying. Attempt #{}'
- .format(attempt))
+ print(
+ 'Protocol error on submit, sleeping and retrying. Attempt #{}'.format(
+ attempt
+ )
+ )
time.sleep(5)
continue
else:
break
+ # Early exit when the maximum number of retry is reached.
+ if attempt == send_retry_limit:
+ print(
+ 'Protocol error on submit, maximum number of retry reached ({})'.format(
+ attempt
+ )
+ )
+ return -1
print('Lava jobid:{}'.format(jobid))
- print('Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}'.format(jobid))
+ print(
+ 'Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}'.format(
+ jobid
+ )
+ )
- #Check the status of the job every 30 seconds
+ # Check the status of the job every 30 seconds
jobstatus = server.scheduler.job_state(jobid)['job_state']
running = False
while jobstatus in ['Submitted', 'Scheduling', 'Scheduled', 'Running']:
if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
print_test_output(server, jobid)
- elif test_type is TestType.baremetal_benchmarks:
- fetch_benchmark_results(args.build_id)
passed, failed = check_job_all_test_cases_state_count(server, jobid)
print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
return 0
+
if __name__ == "__main__":
sys.exit(main())