import os
import sys
import time
-import xmlrpclib
+import xmlrpc.client
from collections import OrderedDict
from enum import Enum
baremetal_benchmarks=1
baremetal_tests=2
kvm_tests=3
+ kvm_fuzzing_tests=4
def get_job_bundle_content(server, job):
try:
bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
bundle = server.dashboard.get(bundle_sha)
- except Fault as f:
- print 'Error while fetching results bundle', f
+ except xmlrpc.client.Fault as f:
+ print('Error while fetching results bundle', f.faultString)
+ raise f
return json.loads(bundle['content'])
def check_job_all_test_cases_state_count(server, job):
content = get_job_bundle_content(server, job)
+ # FIXME:Those tests are part of the boot actions and fail randomly but
+ # doesn't affect the behaviour of the tests. We should update our Lava
+ # installation and try to reproduce it. This error was encountered on
+ # Ubuntu 16.04.
+ tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
+
passed_tests=0
failed_tests=0
for run in content['test_runs']:
if 'test_case_id' in result :
if result['result'] in 'pass':
passed_tests+=1
- elif result['test_case_id'] in 'wait_for_test_image_prompt':
- # FIXME:This test is part of the boot action and fails
- # randomly but doesn't affect the behaviour of the tests.
- # No reply on the Lava IRC channel yet. We should update
- # our Lava installation and try to reproduce it. This error
- # was encountered ont the KVM trusty image only. Not seen
- # on Xenial at this point.
+ elif result['test_case_id'] in tests_known_to_fail:
pass
else:
failed_tests+=1
# save them as CSV files localy
def fetch_benchmark_results(server, job):
content = get_job_bundle_content(server, job)
- testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
+ testcases = ['processed_results_close.csv',
+ 'processed_results_ioctl.csv',
+ 'processed_results_open_efault.csv',
+ 'processed_results_open_enoent.csv',
+ 'processed_results_dup_close.csv',
+ 'processed_results_raw_syscall_getpid.csv',
+ 'processed_results_lttng_test_filter.csv']
# The result bundle is a large JSON containing the results of every testcase
# of the LAVA job as well as the files that were attached during the run.
# benchmark results produced during the run.
for run in content['test_runs']:
# We only care of the benchmark testcases
- if 'benchmark-syscall-' in run['test_id']:
+ if 'benchmark-' in run['test_id']:
if 'test_results' in run:
for res in run['test_results']:
if 'attachments' in res:
for a in res['attachments']:
# We only save the results file
if a['pathname'] in testcases:
- with open(a['pathname'],'w') as f:
+ with open(a['pathname'],'wb') as f:
# Convert the b64 representation of the
# result file and write it to a file
# in the current working directory
# Decode the base64 file and split on newlines to iterate
# on list
- testoutput = base64.b64decode(attachment['content']).split('\n')
+ testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
# Create a generator to iterate on the lines and keeping
# the state of the iterator across the two loops.
job = OrderedDict({
'health_check': False,
'job_name': name,
- 'device_type':build_device,
+ 'device_type': build_device,
'tags': [ ],
- 'timeout': 18000,
+ 'timeout': 7200,
'actions': []
})
if build_device in 'x86':
def get_config_cmd(build_device):
packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
- 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev']
+ 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
+ 'libnuma-dev', 'python3-dev', 'swig', 'stress']
command = OrderedDict({
'command': 'lava_command_run',
'parameters': {
'cat /etc/resolv.conf',
'echo nameserver 172.18.0.12 > /etc/resolv.conf',
'groupadd tracing'
- ]
+ ],
+ 'timeout':300
}
})
if build_device in 'x86':
'revision': 'master',
'testdef': 'lava/baremetal-tests/failing-close.yml'
},
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
+ },
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
},
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
+ },
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml'
+ },
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
+ },
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
}
],
- 'timeout': 18000
+ 'timeout': 7200
}
})
return command
'testdef': 'lava/baremetal-tests/perf-tests.yml'
}
],
- 'timeout': 18000
+ 'timeout': 3600
}
})
return command
'testdef': 'lava/baremetal-tests/destructive-tests.yml'
}
],
- 'timeout': 18000
+ 'timeout': 7200
+ }
+ })
+ return command
+def get_kprobes_test_cmd():
+ command = OrderedDict({
+ 'command': 'lava_test_shell',
+ 'parameters': {
+ 'testdef_repos': [
+ {
+ 'git-repo': 'https://github.com/lttng/lttng-ci.git',
+ 'revision': 'master',
+ 'testdef': 'lava/baremetal-tests/kprobe-fuzzing-tests.yml'
+ }
+ ],
+ 'timeout': 7200
}
})
return command
'commands': [
'pip3 install --upgrade pip',
'hash -r',
- 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
'pip3 install vlttng',
],
- 'timeout': 18000
+ 'timeout': 3600
}
})
vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
- ' --profile babeltrace-stable-1.4 ' \
+ ' --override projects.babeltrace.build-env.PYTHON=python3' \
+ ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
+ ' --profile babeltrace-stable-1.4' \
+ ' --profile babeltrace-python' \
' --profile lttng-tools-master' \
' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
' --profile lttng-tools-no-man-pages'
parser.add_argument('-k', '--kernel', required=True)
parser.add_argument('-km', '--kmodule', required=True)
parser.add_argument('-lm', '--lmodule', required=True)
- parser.add_argument('-l', '--lava-key', required=True)
parser.add_argument('-tc', '--tools-commit', required=True)
parser.add_argument('-uc', '--ust-commit', required=False)
args = parser.parse_args()
test_type = TestType.baremetal_tests
elif args.type in 'kvm-tests':
test_type = TestType.kvm_tests
+ elif args.type in 'kvm-fuzzing-tests':
+ test_type = TestType.kvm_fuzzing_tests
else:
print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
return -1
+ lava_api_key = None
+ try:
+ lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
+ except Exception as e:
+ print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
+ return -1
+
if test_type is TestType.baremetal_benchmarks:
j = create_new_job(args.jobname, build_device='x86')
j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
elif test_type is TestType.baremetal_tests:
j = create_new_job(args.jobname, build_device='x86')
j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
- elif test_type is TestType.kvm_tests:
+ elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
j = create_new_job(args.jobname, build_device='kvm')
j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
j['actions'].append(get_kvm_tests_cmd())
j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
+ elif test_type is TestType.kvm_fuzzing_tests:
+ if args.ust_commit is None:
+ print('Tests runs need -uc/--ust-commit options. Exiting...')
+ return -1
+ j['actions'].append(get_config_cmd('kvm'))
+ j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
+ j['actions'].append(get_kprobes_test_cmd())
+ j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
else:
assert False, 'Unknown test type'
- server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME))
+ server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
jobid = server.scheduler.submit_job(json.dumps(j))
print('Lava jobid:{}'.format(jobid))
+ print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
#Check the status of the job every 30 seconds
jobstatus = server.scheduler.job_status(jobid)['job_status']
time.sleep(30)
jobstatus = server.scheduler.job_status(jobid)['job_status']
- passed, failed=check_job_all_test_cases_state_count(server, jobid)
-
if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
print_test_output(server, jobid)
elif test_type is TestType.baremetal_benchmarks:
if jobstatus not in 'Complete':
return -1
else:
+ passed, failed=check_job_all_test_cases_state_count(server, jobid)
print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
- if failed == 0:
- return 0
- else:
- return -1
+ if failed == 0:
+ return 0
+ else:
+ return -1
if __name__ == "__main__":
sys.exit(main())