8ef9ad48c081c711fa93e46d3c4bf03edf78867c
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 from collections
import OrderedDict
28 HOSTNAME
= 'lava-master.internal.efficios.com'
29 SCP_PATH
= 'scp://jenkins-lava@storage.internal.efficios.com'
32 baremetal_benchmarks
=1
36 def get_job_bundle_content(server
, job
):
38 bundle_sha
= server
.scheduler
.job_status(str(job
))['bundle_sha1']
39 bundle
= server
.dashboard
.get(bundle_sha
)
40 except xmlrpc
.client
.Fault
as f
:
41 print('Error while fetching results bundle', f
.faultString
)
43 return json
.loads(bundle
['content'])
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server
, job
):
48 content
= get_job_bundle_content(server
, job
)
50 # FIXME:Those tests are part of the boot actions and fail randomly but
51 # doesn't affect the behaviour of the tests. We should update our Lava
52 # installation and try to reproduce it. This error was encountered on
54 tests_known_to_fail
=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
58 for run
in content
['test_runs']:
59 for result
in run
['test_results']:
60 if 'test_case_id' in result
:
61 if result
['result'] in 'pass':
63 elif result
['test_case_id'] in tests_known_to_fail
:
67 return (passed_tests
, failed_tests
)
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server
, job
):
72 content
= get_job_bundle_content(server
, job
)
73 testcases
= ['processed_results_close.csv',
74 'processed_results_ioctl.csv',
75 'processed_results_open_efault.csv',
76 'processed_results_open_enoent.csv',
77 'processed_results_dup_close.csv',
78 'processed_results_raw_syscall_getpid.csv',
79 'processed_results_lttng_test_filter.csv']
81 # The result bundle is a large JSON containing the results of every testcase
82 # of the LAVA job as well as the files that were attached during the run.
83 # We need to iterate over this JSON to get the base64 representation of the
84 # benchmark results produced during the run.
85 for run
in content
['test_runs']:
86 # We only care of the benchmark testcases
87 if 'benchmark-' in run
['test_id']:
88 if 'test_results' in run
:
89 for res
in run
['test_results']:
90 if 'attachments' in res
:
91 for a
in res
['attachments']:
92 # We only save the results file
93 if a
['pathname'] in testcases
:
94 with
open(a
['pathname'],'wb') as f
:
95 # Convert the b64 representation of the
96 # result file and write it to a file
97 # in the current working directory
98 f
.write(base64
.b64decode(a
['content']))
100 # Parse the attachment of the testcase to fetch the stdout of the test suite
101 def print_test_output(server
, job
):
102 content
= get_job_bundle_content(server
, job
)
105 for run
in content
['test_runs']:
106 if run
['test_id'] in 'lttng-kernel-test':
107 for attachment
in run
['attachments']:
108 if attachment
['pathname'] in 'stdout.log':
110 # Decode the base64 file and split on newlines to iterate
112 testoutput
= str(base64
.b64decode(bytes(attachment
['content'], encoding
='UTF-8'))).split('\n')
114 # Create a generator to iterate on the lines and keeping
115 # the state of the iterator across the two loops.
116 testoutput_iter
= iter(testoutput
)
117 for line
in testoutput_iter
:
119 # Find the header of the test case and start printing
121 if 'LAVA_SIGNAL_STARTTC run-tests' in line
:
123 print('---- TEST SUITE OUTPUT BEGIN ----')
124 for line
in testoutput_iter
:
125 if 'LAVA_SIGNAL_ENDTC run-tests' not in line
:
128 # Print until we reach the end of the
133 print('----- TEST SUITE OUTPUT END -----')
136 def create_new_job(name
, build_device
):
138 'health_check': False,
140 'device_type': build_device
,
145 if build_device
in 'x86':
146 job
['tags'].append('dev-sda1')
151 command
= OrderedDict({
152 'command': 'boot_image'
156 def get_config_cmd(build_device
):
157 packages
=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
158 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
159 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
160 'libnuma-dev', 'python3-dev', 'swig', 'stress']
161 command
= OrderedDict({
162 'command': 'lava_command_run',
165 'cat /etc/resolv.conf',
166 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
172 if build_device
in 'x86':
173 command
['parameters']['commands'].extend([
174 'mount /dev/sda1 /tmp',
177 command
['parameters']['commands'].extend([
179 'locale-gen en_US.UTF-8',
182 'apt-get install -y {}'.format(' '.join(packages
))
186 def get_baremetal_benchmarks_cmd():
187 command
= OrderedDict({
188 'command': 'lava_test_shell',
192 'git-repo': 'https://github.com/lttng/lttng-ci.git',
193 'revision': 'master',
194 'testdef': 'lava/baremetal-tests/failing-close.yml'
197 'git-repo': 'https://github.com/lttng/lttng-ci.git',
198 'revision': 'master',
199 'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
202 'git-repo': 'https://github.com/lttng/lttng-ci.git',
203 'revision': 'master',
204 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
207 'git-repo': 'https://github.com/lttng/lttng-ci.git',
208 'revision': 'master',
209 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
212 'git-repo': 'https://github.com/lttng/lttng-ci.git',
213 'revision': 'master',
214 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml'
217 'git-repo': 'https://github.com/lttng/lttng-ci.git',
218 'revision': 'master',
219 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
222 'git-repo': 'https://github.com/lttng/lttng-ci.git',
223 'revision': 'master',
224 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
232 def get_baremetal_tests_cmd():
233 command
= OrderedDict({
234 'command': 'lava_test_shell',
238 'git-repo': 'https://github.com/lttng/lttng-ci.git',
239 'revision': 'master',
240 'testdef': 'lava/baremetal-tests/perf-tests.yml'
248 def get_kvm_tests_cmd():
249 command
= OrderedDict({
250 'command': 'lava_test_shell',
254 'git-repo': 'https://github.com/lttng/lttng-ci.git',
255 'revision': 'master',
256 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
259 'git-repo': 'https://github.com/lttng/lttng-ci.git',
260 'revision': 'master',
261 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
268 def get_kprobes_test_cmd():
269 command
= OrderedDict({
270 'command': 'lava_test_shell',
274 'git-repo': 'https://github.com/lttng/lttng-ci.git',
275 'revision': 'master',
276 'testdef': 'lava/baremetal-tests/kprobe-fuzzing-tests.yml'
284 def get_results_cmd(stream_name
):
285 command
= OrderedDict({
286 'command': 'submit_results',
288 'server': 'http://lava-master.internal.efficios.com/RPC2/'
291 command
['parameters']['stream']='/anonymous/'+stream_name
+'/'
294 def get_deploy_cmd_kvm(jenkins_job
, kernel_path
, linux_modules_path
, lttng_modules_path
):
295 command
= OrderedDict({
296 'command': 'deploy_kernel',
301 'target_type': 'ubuntu',
302 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
303 'login_prompt': 'kvm02 login:',
308 command
['parameters']['customize'][SCP_PATH
+linux_modules_path
]=['rootfs:/','archive']
309 command
['parameters']['customize'][SCP_PATH
+lttng_modules_path
]=['rootfs:/','archive']
310 command
['parameters']['kernel'] = str(SCP_PATH
+kernel_path
)
311 command
['metadata']['jenkins_jobname'] = jenkins_job
315 def get_deploy_cmd_x86(jenkins_job
, kernel_path
, linux_modules_path
, lttng_modules_path
, nb_iter
=None):
316 command
= OrderedDict({
317 'command': 'deploy_kernel',
322 'nfsrootfs': str(SCP_PATH
+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
323 'target_type': 'ubuntu'
327 command
['parameters']['overlays'].append( str(SCP_PATH
+linux_modules_path
))
328 command
['parameters']['overlays'].append( str(SCP_PATH
+lttng_modules_path
))
329 command
['parameters']['kernel'] = str(SCP_PATH
+kernel_path
)
330 command
['metadata']['jenkins_jobname'] = jenkins_job
331 if nb_iter
is not None:
332 command
['metadata']['nb_iterations'] = nb_iter
337 def get_env_setup_cmd(build_device
, lttng_tools_commit
, lttng_ust_commit
=None):
338 command
= OrderedDict({
339 'command': 'lava_command_run',
342 'pip3 install --upgrade pip',
344 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
345 'pip3 install vlttng',
351 vlttng_cmd
= 'vlttng --jobs=$(nproc) --profile urcu-master' \
352 ' --override projects.babeltrace.build-env.PYTHON=python3' \
353 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
354 ' --profile babeltrace-stable-1.4' \
355 ' --profile babeltrace-python' \
356 ' --profile lttng-tools-master' \
357 ' --override projects.lttng-tools.checkout='+lttng_tools_commit
+ \
358 ' --profile lttng-tools-no-man-pages'
360 if lttng_ust_commit
is not None:
361 vlttng_cmd
+= ' --profile lttng-ust-master ' \
362 ' --override projects.lttng-ust.checkout='+lttng_ust_commit
+ \
363 ' --profile lttng-ust-no-man-pages'
366 if build_device
in 'kvm':
367 virtenv_path
= '/root/virtenv'
369 virtenv_path
= '/tmp/virtenv'
371 vlttng_cmd
+= ' '+virtenv_path
373 command
['parameters']['commands'].append(vlttng_cmd
)
374 command
['parameters']['commands'].append('ln -s '+virtenv_path
+' /root/lttngvenv')
375 command
['parameters']['commands'].append('sync')
381 parser
= argparse
.ArgumentParser(description
='Launch baremetal test using Lava')
382 parser
.add_argument('-t', '--type', required
=True)
383 parser
.add_argument('-j', '--jobname', required
=True)
384 parser
.add_argument('-k', '--kernel', required
=True)
385 parser
.add_argument('-km', '--kmodule', required
=True)
386 parser
.add_argument('-lm', '--lmodule', required
=True)
387 parser
.add_argument('-tc', '--tools-commit', required
=True)
388 parser
.add_argument('-uc', '--ust-commit', required
=False)
389 args
= parser
.parse_args()
391 if args
.type in 'baremetal-benchmarks':
392 test_type
= TestType
.baremetal_benchmarks
393 elif args
.type in 'baremetal-tests':
394 test_type
= TestType
.baremetal_tests
395 elif args
.type in 'kvm-tests':
396 test_type
= TestType
.kvm_tests
398 print('argument -t/--type {} unrecognized. Exiting...'.format(args
.type))
403 lava_api_key
= os
.environ
['LAVA_JENKINS_TOKEN']
404 except Exception as e
:
405 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e
)
408 if test_type
is TestType
.baremetal_benchmarks
:
409 j
= create_new_job(args
.jobname
, build_device
='x86')
410 j
['actions'].append(get_deploy_cmd_x86(args
.jobname
, args
.kernel
, args
.kmodule
, args
.lmodule
))
411 elif test_type
is TestType
.baremetal_tests
:
412 j
= create_new_job(args
.jobname
, build_device
='x86')
413 j
['actions'].append(get_deploy_cmd_x86(args
.jobname
, args
.kernel
, args
.kmodule
, args
.lmodule
))
414 elif test_type
is TestType
.kvm_tests
:
415 j
= create_new_job(args
.jobname
, build_device
='kvm')
416 j
['actions'].append(get_deploy_cmd_kvm(args
.jobname
, args
.kernel
, args
.kmodule
, args
.lmodule
))
418 j
['actions'].append(get_boot_cmd())
420 if test_type
is TestType
.baremetal_benchmarks
:
421 j
['actions'].append(get_config_cmd('x86'))
422 j
['actions'].append(get_env_setup_cmd('x86', args
.tools_commit
))
423 j
['actions'].append(get_baremetal_benchmarks_cmd())
424 j
['actions'].append(get_results_cmd(stream_name
='benchmark-kernel'))
425 elif test_type
is TestType
.baremetal_tests
:
426 if args
.ust_commit
is None:
427 print('Tests runs need -uc/--ust-commit options. Exiting...')
429 j
['actions'].append(get_config_cmd('x86'))
430 j
['actions'].append(get_env_setup_cmd('x86', args
.tools_commit
, args
.ust_commit
))
431 j
['actions'].append(get_baremetal_tests_cmd())
432 j
['actions'].append(get_results_cmd(stream_name
='tests-kernel'))
433 elif test_type
is TestType
.kvm_tests
:
434 if args
.ust_commit
is None:
435 print('Tests runs need -uc/--ust-commit options. Exiting...')
437 j
['actions'].append(get_config_cmd('kvm'))
438 j
['actions'].append(get_env_setup_cmd('kvm', args
.tools_commit
, args
.ust_commit
))
439 j
['actions'].append(get_kvm_tests_cmd())
440 j
['actions'].append(get_kprobes_test_cmd())
441 j
['actions'].append(get_results_cmd(stream_name
='tests-kernel'))
443 assert False, 'Unknown test type'
445 server
= xmlrpc
.client
.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME
, lava_api_key
, HOSTNAME
))
447 jobid
= server
.scheduler
.submit_job(json
.dumps(j
))
449 print('Lava jobid:{}'.format(jobid
))
450 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid
))
452 #Check the status of the job every 30 seconds
453 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
455 while jobstatus
in 'Submitted' or jobstatus
in 'Running':
456 if not_running
is False and jobstatus
in 'Running':
457 print('Job started running')
460 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
462 if test_type
is TestType
.kvm_tests
or test_type
is TestType
.baremetal_tests
:
463 print_test_output(server
, jobid
)
464 elif test_type
is TestType
.baremetal_benchmarks
:
465 fetch_benchmark_results(server
, jobid
)
467 print('Job ended with {} status.'.format(jobstatus
))
468 if jobstatus
not in 'Complete':
471 passed
, failed
=check_job_all_test_cases_state_count(server
, jobid
)
472 print('With {} passed and {} failed Lava test cases.'.format(passed
, failed
))
479 if __name__
== "__main__":
This page took 0.04209 seconds and 4 git commands to generate.