SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
class TestType(Enum):
- benchmarks=1
- tests=2
+ baremetal_benchmarks=1
+ baremetal_tests=2
+ kvm_tests=3
def get_job_bundle_content(server, job):
- bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
- bundle = server.dashboard.get(bundle_sha)
+ try:
+ bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
+ bundle = server.dashboard.get(bundle_sha)
+ except Fault as f:
+ print 'Error while fetching results bundle', f
return json.loads(bundle['content'])
failed_tests=0
for run in content['test_runs']:
for result in run['test_results']:
- if 'test_case_id' in result:
+ if 'test_case_id' in result :
if result['result'] in 'pass':
passed_tests+=1
+ elif result['test_case_id'] in 'wait_for_test_image_prompt':
+ # FIXME:This test is part of the boot action and fails
+ # randomly but doesn't affect the behaviour of the tests.
+ # No reply on the Lava IRC channel yet. We should update
+ # our Lava installation and try to reproduce it. This error
+ # was encountered ont the KVM trusty image only. Not seen
+ # on Xenial at this point.
+ pass
else:
failed_tests+=1
return (passed_tests, failed_tests)
print('----- TEST SUITE OUTPUT END -----')
break
-def create_new_job(name):
+def create_new_job(name, build_device):
job = OrderedDict({
'health_check': False,
'job_name': name,
- 'device_type': 'x86',
- 'tags': [ 'dev-sda1' ],
+ 'device_type':build_device,
+ 'tags': [ ],
'timeout': 18000,
'actions': []
})
+ if build_device in 'x86':
+ job['tags'].append('dev-sda1')
+
return job
def get_boot_cmd():
})
return command
-def get_config_cmd():
+def get_config_cmd(build_device):
packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
- 'libelf-dev', 'libmount-dev', 'libxml2', 'python3-pandas', \
- 'python3-numpy']
+ 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev']
command = OrderedDict({
'command': 'lava_command_run',
'parameters': {
'commands': [
- 'ifup eth0',
- 'route -n',
'cat /etc/resolv.conf',
'echo nameserver 172.18.0.12 > /etc/resolv.conf',
- 'mount /dev/sda1 /tmp',
- 'rm -rf /tmp/*',
- 'depmod -a',
- 'locale-gen en_US.UTF-8',
- 'apt-get update',
- 'apt-get install -y {}'.format(' '.join(packages)),
+ 'groupadd tracing'
]
}
})
+ if build_device in 'x86':
+ command['parameters']['commands'].extend([
+ 'mount /dev/sda1 /tmp',
+ 'rm -rf /tmp/*'])
+
+ command['parameters']['commands'].extend([
+ 'depmod -a',
+ 'locale-gen en_US.UTF-8',
+ 'apt-get update',
+ 'apt-get upgrade',
+ 'apt-get install -y {}'.format(' '.join(packages))
+ ])
return command
-def get_benchmarks_cmd():
+def get_baremetal_benchmarks_cmd():
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
})
return command
-def get_tests_cmd():
+def get_baremetal_tests_cmd():
+ command = OrderedDict({
+ 'command': 'lava_test_shell',
+ 'parameters': {
+ 'testdef_repos': [
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/perf-tests.yml'
+ }
+ ],
+ 'timeout': 18000
+ }
+ })
+ return command
+
+def get_kvm_tests_cmd():
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
'testdef': 'lava/baremetal-tests/kernel-tests.yml'
+ },
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
}
],
'timeout': 18000
command['parameters']['stream']='/anonymous/'+stream_name+'/'
return command
-def get_deploy_cmd(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
+def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
+ command = OrderedDict({
+ 'command': 'deploy_kernel',
+ 'metadata': {},
+ 'parameters': {
+ 'customize': {},
+ 'kernel': None,
+ 'target_type': 'ubuntu',
+ 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
+ 'login_prompt': 'kvm02 login:',
+ 'username': 'root'
+ }
+ })
+
+ command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
+ command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
+ command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
+ command['metadata']['jenkins_jobname'] = jenkins_job
+
+ return command
+
+def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
command = OrderedDict({
'command': 'deploy_kernel',
'metadata': {},
command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
- command['metadata']['jenkins_jobname'] = jenkins_job
+ command['metadata']['jenkins_jobname'] = jenkins_job
if nb_iter is not None:
command['metadata']['nb_iterations'] = nb_iter
return command
-def get_env_setup_cmd(lttng_tools_commit, lttng_ust_commit=None):
+def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
command = OrderedDict({
'command': 'lava_command_run',
'parameters': {
'commands': [
+ 'pip3 install --upgrade pip',
+ 'hash -r',
'git clone https://github.com/frdeso/syscall-bench-it.git bm',
'pip3 install vlttng',
],
}
})
- vlttng_cmd = 'vlttng --jobs=16 --profile urcu-master' \
+ vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
' --profile babeltrace-stable-1.4 ' \
' --profile lttng-tools-master' \
' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
' --profile lttng-ust-no-man-pages'
- vlttng_cmd += " /tmp/virtenv"
+ virtenv_path = None
+ if build_device in 'kvm':
+ virtenv_path = '/root/virtenv'
+ else:
+ virtenv_path = '/tmp/virtenv'
+
+ vlttng_cmd += ' '+virtenv_path
command['parameters']['commands'].append(vlttng_cmd)
+ command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
+ command['parameters']['commands'].append('sync')
+
return command
def main():
parser.add_argument('-uc', '--ust-commit', required=False)
args = parser.parse_args()
-
- j = create_new_job(args.jobname)
- j['actions'].append(get_deploy_cmd(args.jobname, args.kernel, args.kmodule, args.lmodule))
- j['actions'].append(get_boot_cmd())
- j['actions'].append(get_config_cmd())
-
- if args.type in 'benchmarks':
- test_type = TestType.benchmarks
- elif args.type in 'tests':
- test_type = TestType.tests
+ if args.type in 'baremetal-benchmarks':
+ test_type = TestType.baremetal_benchmarks
+ elif args.type in 'baremetal-tests':
+ test_type = TestType.baremetal_tests
+ elif args.type in 'kvm-tests':
+ test_type = TestType.kvm_tests
else:
print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
return -1
- if test_type is TestType.benchmarks:
- j['actions'].append(get_env_setup_cmd(args.tools_commit))
- j['actions'].append(get_benchmarks_cmd())
+ if test_type is TestType.baremetal_benchmarks:
+ j = create_new_job(args.jobname, build_device='x86')
+ j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+ elif test_type is TestType.baremetal_tests:
+ j = create_new_job(args.jobname, build_device='x86')
+ j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+ elif test_type is TestType.kvm_tests:
+ j = create_new_job(args.jobname, build_device='kvm')
+ j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
+
+ j['actions'].append(get_boot_cmd())
+
+ if test_type is TestType.baremetal_benchmarks:
+ j['actions'].append(get_config_cmd('x86'))
+ j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
+ j['actions'].append(get_baremetal_benchmarks_cmd())
j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
- elif test_type is TestType.tests:
+ elif test_type is TestType.baremetal_tests:
if args.ust_commit is None:
print('Tests runs need -uc/--ust-commit options. Exiting...')
return -1
- j['actions'].append(get_env_setup_cmd(args.tools_commit, args.ust_commit))
- j['actions'].append(get_tests_cmd())
+ j['actions'].append(get_config_cmd('x86'))
+ j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
+ j['actions'].append(get_baremetal_tests_cmd())
+ j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
+ elif test_type is TestType.kvm_tests:
+ if args.ust_commit is None:
+ print('Tests runs need -uc/--ust-commit options. Exiting...')
+ return -1
+ j['actions'].append(get_config_cmd('kvm'))
+ j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
+ j['actions'].append(get_kvm_tests_cmd())
j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
else:
assert False, 'Unknown test type'
jobid = server.scheduler.submit_job(json.dumps(j))
+ print('Lava jobid:{}'.format(jobid))
+
#Check the status of the job every 30 seconds
jobstatus = server.scheduler.job_status(jobid)['job_status']
while jobstatus in 'Submitted' or jobstatus in 'Running':
time.sleep(30)
jobstatus = server.scheduler.job_status(jobid)['job_status']
- print('Job ended with {} status.'.format(jobstatus))
- if jobstatus not in 'Complete':
- return -1
-
passed, failed=check_job_all_test_cases_state_count(server, jobid)
- print('With {} passed tests and {} failed tests.'.format(passed, failed))
-
- if test_type is TestType.tests:
+ if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
print_test_output(server, jobid)
- if failed == 0:
+ print('Job ended with {} status.'.format(jobstatus))
+ if jobstatus not in 'Complete':
+ return -1
+ else:
+ print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
+
+ if failed == 0:
return 0
else:
return -1