import base64
import json
import os
+import random
import sys
import time
import xmlrpc.client
+from urllib.parse import urljoin
+from urllib.request import urlretrieve
from collections import OrderedDict
from enum import Enum
USERNAME = 'frdeso'
HOSTNAME = 'lava-master.internal.efficios.com'
SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
+OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/"
class TestType(Enum):
baremetal_benchmarks=1
# Get the benchmark results from the lava bundle
# save them as CSV files localy
-def fetch_benchmark_results(server, job):
- content = get_job_bundle_content(server, job)
+def fetch_benchmark_results(build_id):
testcases = ['processed_results_close.csv',
'processed_results_ioctl.csv',
'processed_results_open_efault.csv',
'processed_results_dup_close.csv',
'processed_results_raw_syscall_getpid.csv',
'processed_results_lttng_test_filter.csv']
-
- # The result bundle is a large JSON containing the results of every testcase
- # of the LAVA job as well as the files that were attached during the run.
- # We need to iterate over this JSON to get the base64 representation of the
- # benchmark results produced during the run.
- for run in content['test_runs']:
- # We only care of the benchmark testcases
- if 'benchmark-' in run['test_id']:
- if 'test_results' in run:
- for res in run['test_results']:
- if 'attachments' in res:
- for a in res['attachments']:
- # We only save the results file
- if a['pathname'] in testcases:
- with open(a['pathname'],'wb') as f:
- # Convert the b64 representation of the
- # result file and write it to a file
- # in the current working directory
- f.write(base64.b64decode(a['content']))
+ for testcase in testcases:
+ url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
+ urlretrieve(url, testcase)
# Parse the attachment of the testcase to fetch the stdout of the test suite
def print_test_output(server, job):
])
return command
-def get_baremetal_benchmarks_cmd():
+def get_baremetal_benchmarks_cmd(build_id):
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/failing-close.yml'
+ 'testdef': 'lava/system-tests/failing-close.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/failing-ioctl.yml'
+ 'testdef': 'lava/system-tests/failing-ioctl.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/failing-open-efault.yml'
+ 'testdef': 'lava/system-tests/failing-open-efault.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/success-dup-close.yml'
+ 'testdef': 'lava/system-tests/success-dup-close.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/raw-syscall-getpid.yml'
+ 'testdef': 'lava/system-tests/raw-syscall-getpid.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/failing-open-enoent.yml'
+ 'testdef': 'lava/system-tests/failing-open-enoent.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/lttng-test-filter.yml'
+ 'testdef': 'lava/system-tests/lttng-test-filter.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
}
],
'timeout': 7200
})
return command
-def get_baremetal_tests_cmd():
+def get_baremetal_tests_cmd(build_id):
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/perf-tests.yml'
+ 'testdef': 'lava/system-tests/perf-tests.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
}
],
'timeout': 3600
})
return command
-def get_kvm_tests_cmd():
+def get_kvm_tests_cmd(build_id):
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/kernel-tests.yml'
+ 'testdef': 'lava/system-tests/kernel-tests.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/destructive-tests.yml'
+ 'testdef': 'lava/system-tests/destructive-tests.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
}
],
'timeout': 7200
return command
def get_kprobes_generate_data_cmd():
+ random_seed = random.randint(0, 1000000)
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml'
+ 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml',
+ 'parameters': { 'RANDOM_SEED': str(random_seed) }
}
],
'timeout': 60
})
return command
-def get_kprobes_test_cmd():
+def get_kprobes_test_cmd(round_nb):
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml'
+ 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml',
+ 'parameters': { 'ROUND_NB': str(round_nb) }
}
- ],
- 'timeout': 7200
+ ],
+ 'timeout': 1000
}
})
return command
command['parameters']['stream']='/anonymous/'+stream_name+'/'
return command
-def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
+def get_deploy_cmd_kvm(jenkins_job, kernel_path, lttng_modules_path):
command = OrderedDict({
'command': 'deploy_kernel',
'metadata': {},
}
})
- command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
command['metadata']['jenkins_jobname'] = jenkins_job
return command
-def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
+def get_deploy_cmd_x86(jenkins_job, kernel_path, lttng_modules_path, nb_iter=None):
command = OrderedDict({
'command': 'deploy_kernel',
'metadata': {},
}
})
- command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
command['metadata']['jenkins_jobname'] = jenkins_job
parser.add_argument('-t', '--type', required=True)
parser.add_argument('-j', '--jobname', required=True)
parser.add_argument('-k', '--kernel', required=True)
- parser.add_argument('-km', '--kmodule', required=True)
parser.add_argument('-lm', '--lmodule', required=True)
parser.add_argument('-tc', '--tools-commit', required=True)
+ parser.add_argument('-id', '--build-id', required=True)
parser.add_argument('-uc', '--ust-commit', required=False)
args = parser.parse_args()
if test_type is TestType.baremetal_benchmarks:
j = create_new_job(args.jobname, build_device='x86')
- j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+ j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.lmodule))
elif test_type is TestType.baremetal_tests:
j = create_new_job(args.jobname, build_device='x86')
- j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
+ j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.lmodule))
elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
j = create_new_job(args.jobname, build_device='kvm')
- j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
+ j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.lmodule))
j['actions'].append(get_boot_cmd())
if test_type is TestType.baremetal_benchmarks:
j['actions'].append(get_config_cmd('x86'))
j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
- j['actions'].append(get_baremetal_benchmarks_cmd())
+ j['actions'].append(get_baremetal_benchmarks_cmd(args.build_id))
j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
elif test_type is TestType.baremetal_tests:
if args.ust_commit is None:
return -1
j['actions'].append(get_config_cmd('x86'))
j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
- j['actions'].append(get_baremetal_tests_cmd())
+ j['actions'].append(get_baremetal_tests_cmd(args.build_id))
j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
elif test_type is TestType.kvm_tests:
if args.ust_commit is None:
return -1
j['actions'].append(get_config_cmd('kvm'))
j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
- j['actions'].append(get_kvm_tests_cmd())
+ j['actions'].append(get_kvm_tests_cmd(args.build_id))
j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
elif test_type is TestType.kvm_fuzzing_tests:
if args.ust_commit is None:
print('Tests runs need -uc/--ust-commit options. Exiting...')
return -1
j['actions'].append(get_config_cmd('kvm'))
- j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
j['actions'].append(get_kprobes_generate_data_cmd())
- j['actions'].append(get_kprobes_test_cmd())
+ for i in range(10):
+ j['actions'].append(get_kprobes_test_cmd(round_nb=i))
j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
else:
assert False, 'Unknown test type'
if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
print_test_output(server, jobid)
elif test_type is TestType.baremetal_benchmarks:
- fetch_benchmark_results(server, jobid)
+ fetch_benchmark_results(args.build_id)
print('Job ended with {} status.'.format(jobstatus))
if jobstatus not in 'Complete':