SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
class TestType(Enum):
- benchmarks=1
- tests=2
+ baremetal_benchmarks=1
+ baremetal_tests=2
+ kvm_tests=3
def get_job_bundle_content(server, job):
- bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
- bundle = server.dashboard.get(bundle_sha)
+ try:
+ bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
+ bundle = server.dashboard.get(bundle_sha)
+ except Fault as f:
+ print 'Error while fetching results bundle', f
return json.loads(bundle['content'])
failed_tests=0
for run in content['test_runs']:
for result in run['test_results']:
- if 'test_case_id' in result:
+ if 'test_case_id' in result :
if result['result'] in 'pass':
passed_tests+=1
+ elif result['test_case_id'] in 'wait_for_test_image_prompt':
+ # FIXME:This test is part of the boot action and fails
+ # randomly but doesn't affect the behaviour of the tests.
+ # No reply on the Lava IRC channel yet. We should update
+ # our Lava installation and try to reproduce it. This error
+ # was encountered ont the KVM trusty image only. Not seen
+ # on Xenial at this point.
+ pass
else:
failed_tests+=1
return (passed_tests, failed_tests)
+# Get the benchmark results from the lava bundle
+# save them as CSV files localy
+def fetch_benchmark_results(server, job):
+ content = get_job_bundle_content(server, job)
+ testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
+
+ # The result bundle is a large JSON containing the results of every testcase
+ # of the LAVA job as well as the files that were attached during the run.
+ # We need to iterate over this JSON to get the base64 representation of the
+ # benchmark results produced during the run.
+ for run in content['test_runs']:
+ # We only care of the benchmark testcases
+ if 'benchmark-syscall-' in run['test_id']:
+ if 'test_results' in run:
+ for res in run['test_results']:
+ if 'attachments' in res:
+ for a in res['attachments']:
+ # We only save the results file
+ if a['pathname'] in testcases:
+ with open(a['pathname'],'w') as f:
+ # Convert the b64 representation of the
+ # result file and write it to a file
+ # in the current working directory
+ f.write(base64.b64decode(a['content']))
+
# Parse the attachment of the testcase to fetch the stdout of the test suite
def print_test_output(server, job):
content = get_job_bundle_content(server, job)
def get_config_cmd(build_device):
packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
- 'libelf-dev', 'libmount-dev', 'libxml2', 'python3-pandas', \
- 'python3-numpy']
+ 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev']
command = OrderedDict({
'command': 'lava_command_run',
'parameters': {
'commands': [
- 'ifup eth0',
- 'route -n',
'cat /etc/resolv.conf',
'echo nameserver 172.18.0.12 > /etc/resolv.conf',
'groupadd tracing'
'depmod -a',
'locale-gen en_US.UTF-8',
'apt-get update',
+ 'apt-get upgrade',
'apt-get install -y {}'.format(' '.join(packages))
])
return command
-def get_benchmarks_cmd():
+def get_baremetal_benchmarks_cmd():
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
- },
+ }
+ ],
+ 'timeout': 18000
+ }
+ })
+ return command
+
+def get_baremetal_tests_cmd():
+ command = OrderedDict({
+ 'command': 'lava_test_shell',
+ 'parameters': {
+ 'testdef_repos': [
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
})
return command
-def get_tests_cmd():
+def get_kvm_tests_cmd():
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
'testdef': 'lava/baremetal-tests/kernel-tests.yml'
+ },
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
}
],
'timeout': 18000
'parameters': {
'customize': {},
'kernel': None,
- 'rootfs': 'file:///var/lib/lava-server/default/media/images/trusty-grub.img.gz',
- 'target_type': 'ubuntu'
+ 'target_type': 'ubuntu',
+ 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
+ 'login_prompt': 'kvm02 login:',
+ 'username': 'root'
}
})
'command': 'lava_command_run',
'parameters': {
'commands': [
+ 'pip3 install --upgrade pip',
+ 'hash -r',
'git clone https://github.com/frdeso/syscall-bench-it.git bm',
'pip3 install vlttng',
],
}
})
- vlttng_cmd = 'vlttng --jobs=16 --profile urcu-master' \
+ vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
' --profile babeltrace-stable-1.4 ' \
' --profile lttng-tools-master' \
' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
parser.add_argument('-k', '--kernel', required=True)
parser.add_argument('-km', '--kmodule', required=True)
parser.add_argument('-lm', '--lmodule', required=True)
- parser.add_argument('-l', '--lava-key', required=True)
parser.add_argument('-tc', '--tools-commit', required=True)
parser.add_argument('-uc', '--ust-commit', required=False)
args = parser.parse_args()
- if args.type in 'benchmarks':
- test_type = TestType.benchmarks
- elif args.type in 'tests':
- test_type = TestType.tests
+ if args.type in 'baremetal-benchmarks':
+ test_type = TestType.baremetal_benchmarks
+ elif args.type in 'baremetal-tests':
+ test_type = TestType.baremetal_tests
+ elif args.type in 'kvm-tests':
+ test_type = TestType.kvm_tests
else:
print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
return -1
- if test_type is TestType.benchmarks:
+ lava_api_key = None
+ try:
+ lava_api_key = os.environ['LAVA_FRDESO_TOKEN']
+ except Exception, e:
+ print('LAVA_FRDESO_TOKEN not found in the environment variable. Exiting...')
+ return -1
+
+ if test_type is TestType.baremetal_benchmarks:
+ j = create_new_job(args.jobname, build_device='x86')
+ j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+ elif test_type is TestType.baremetal_tests:
j = create_new_job(args.jobname, build_device='x86')
j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
- elif test_type is TestType.tests:
+ elif test_type is TestType.kvm_tests:
j = create_new_job(args.jobname, build_device='kvm')
j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
j['actions'].append(get_boot_cmd())
- if test_type is TestType.benchmarks:
+ if test_type is TestType.baremetal_benchmarks:
j['actions'].append(get_config_cmd('x86'))
j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
- j['actions'].append(get_benchmarks_cmd())
+ j['actions'].append(get_baremetal_benchmarks_cmd())
j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
- elif test_type is TestType.tests:
+ elif test_type is TestType.baremetal_tests:
+ if args.ust_commit is None:
+ print('Tests runs need -uc/--ust-commit options. Exiting...')
+ return -1
+ j['actions'].append(get_config_cmd('x86'))
+ j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
+ j['actions'].append(get_baremetal_tests_cmd())
+ j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
+ elif test_type is TestType.kvm_tests:
if args.ust_commit is None:
print('Tests runs need -uc/--ust-commit options. Exiting...')
return -1
j['actions'].append(get_config_cmd('kvm'))
j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
- j['actions'].append(get_tests_cmd())
+ j['actions'].append(get_kvm_tests_cmd())
j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
else:
assert False, 'Unknown test type'
- server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME))
+ server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
jobid = server.scheduler.submit_job(json.dumps(j))
#Check the status of the job every 30 seconds
jobstatus = server.scheduler.job_status(jobid)['job_status']
+ not_running = False
while jobstatus in 'Submitted' or jobstatus in 'Running':
+ if not_running is False and jobstatus in 'Running':
+ print('Job started running')
+ not_running = True
time.sleep(30)
jobstatus = server.scheduler.job_status(jobid)['job_status']
- print('Job ended with {} status.'.format(jobstatus))
- if jobstatus not in 'Complete':
- return -1
-
passed, failed=check_job_all_test_cases_state_count(server, jobid)
- print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
-
- if test_type is TestType.tests:
+ if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
print_test_output(server, jobid)
+ elif test_type is TestType.baremetal_benchmarks:
+ fetch_benchmark_results(server, jobid)
+
+ print('Job ended with {} status.'.format(jobstatus))
+ if jobstatus not in 'Complete':
+ return -1
+ else:
+ print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
if failed == 0:
return 0