Merge pull request #40 from frdeso/lava_fuzzing_remove_lttng
[lttng-ci.git] / scripts / system-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35 kvm_fuzzing_tests=4
36
37 def get_job_bundle_content(server, job):
38 try:
39 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
40 bundle = server.dashboard.get(bundle_sha)
41 except xmlrpc.client.Fault as f:
42 print('Error while fetching results bundle', f.faultString)
43 raise f
44
45 return json.loads(bundle['content'])
46
47 # Parse the results bundle to see the run-tests testcase
48 # of the lttng-kernel-tests passed successfully
49 def check_job_all_test_cases_state_count(server, job):
50 content = get_job_bundle_content(server, job)
51
52 # FIXME:Those tests are part of the boot actions and fail randomly but
53 # doesn't affect the behaviour of the tests. We should update our Lava
54 # installation and try to reproduce it. This error was encountered on
55 # Ubuntu 16.04.
56 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
57
58 passed_tests=0
59 failed_tests=0
60 for run in content['test_runs']:
61 for result in run['test_results']:
62 if 'test_case_id' in result :
63 if result['result'] in 'pass':
64 passed_tests+=1
65 elif result['test_case_id'] in tests_known_to_fail:
66 pass
67 else:
68 failed_tests+=1
69 return (passed_tests, failed_tests)
70
71 # Get the benchmark results from the lava bundle
72 # save them as CSV files localy
73 def fetch_benchmark_results(server, job):
74 content = get_job_bundle_content(server, job)
75 testcases = ['processed_results_close.csv',
76 'processed_results_ioctl.csv',
77 'processed_results_open_efault.csv',
78 'processed_results_open_enoent.csv',
79 'processed_results_dup_close.csv',
80 'processed_results_raw_syscall_getpid.csv',
81 'processed_results_lttng_test_filter.csv']
82
83 # The result bundle is a large JSON containing the results of every testcase
84 # of the LAVA job as well as the files that were attached during the run.
85 # We need to iterate over this JSON to get the base64 representation of the
86 # benchmark results produced during the run.
87 for run in content['test_runs']:
88 # We only care of the benchmark testcases
89 if 'benchmark-' in run['test_id']:
90 if 'test_results' in run:
91 for res in run['test_results']:
92 if 'attachments' in res:
93 for a in res['attachments']:
94 # We only save the results file
95 if a['pathname'] in testcases:
96 with open(a['pathname'],'wb') as f:
97 # Convert the b64 representation of the
98 # result file and write it to a file
99 # in the current working directory
100 f.write(base64.b64decode(a['content']))
101
102 # Parse the attachment of the testcase to fetch the stdout of the test suite
103 def print_test_output(server, job):
104 content = get_job_bundle_content(server, job)
105 found = False
106
107 for run in content['test_runs']:
108 if run['test_id'] in 'lttng-kernel-test':
109 for attachment in run['attachments']:
110 if attachment['pathname'] in 'stdout.log':
111
112 # Decode the base64 file and split on newlines to iterate
113 # on list
114 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
115
116 testoutput = testoutput.replace('\\n', '\n')
117
118 # Create a generator to iterate on the lines and keeping
119 # the state of the iterator across the two loops.
120 testoutput_iter = iter(testoutput.split('\n'))
121 for line in testoutput_iter:
122
123 # Find the header of the test case and start printing
124 # from there
125 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
126 print('---- TEST SUITE OUTPUT BEGIN ----')
127 for line in testoutput_iter:
128 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
129 print(line)
130 else:
131 # Print until we reach the end of the
132 # section
133 break
134
135 print('----- TEST SUITE OUTPUT END -----')
136 break
137
138 def create_new_job(name, build_device):
139 job = OrderedDict({
140 'health_check': False,
141 'job_name': name,
142 'device_type': build_device,
143 'tags': [ ],
144 'timeout': 7200,
145 'actions': []
146 })
147 if build_device in 'x86':
148 job['tags'].append('dev-sda1')
149
150 return job
151
152 def get_boot_cmd():
153 command = OrderedDict({
154 'command': 'boot_image'
155 })
156 return command
157
158 def get_config_cmd(build_device):
159 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
160 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
161 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
162 'libnuma-dev', 'python3-dev', 'swig', 'stress']
163 command = OrderedDict({
164 'command': 'lava_command_run',
165 'parameters': {
166 'commands': [
167 'cat /etc/resolv.conf',
168 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
169 'groupadd tracing'
170 ],
171 'timeout':300
172 }
173 })
174 if build_device in 'x86':
175 command['parameters']['commands'].extend([
176 'mount /dev/sda1 /tmp',
177 'rm -rf /tmp/*'])
178
179 command['parameters']['commands'].extend([
180 'depmod -a',
181 'locale-gen en_US.UTF-8',
182 'apt-get update',
183 'apt-get upgrade',
184 'apt-get install -y {}'.format(' '.join(packages))
185 ])
186 return command
187
188 def get_baremetal_benchmarks_cmd():
189 command = OrderedDict({
190 'command': 'lava_test_shell',
191 'parameters': {
192 'testdef_repos': [
193 {
194 'git-repo': 'https://github.com/lttng/lttng-ci.git',
195 'revision': 'master',
196 'testdef': 'lava/system-tests/failing-close.yml'
197 },
198 {
199 'git-repo': 'https://github.com/lttng/lttng-ci.git',
200 'revision': 'master',
201 'testdef': 'lava/system-tests/failing-ioctl.yml'
202 },
203 {
204 'git-repo': 'https://github.com/lttng/lttng-ci.git',
205 'revision': 'master',
206 'testdef': 'lava/system-tests/failing-open-efault.yml'
207 },
208 {
209 'git-repo': 'https://github.com/lttng/lttng-ci.git',
210 'revision': 'master',
211 'testdef': 'lava/system-tests/success-dup-close.yml'
212 },
213 {
214 'git-repo': 'https://github.com/lttng/lttng-ci.git',
215 'revision': 'master',
216 'testdef': 'lava/system-tests/raw-syscall-getpid.yml'
217 },
218 {
219 'git-repo': 'https://github.com/lttng/lttng-ci.git',
220 'revision': 'master',
221 'testdef': 'lava/system-tests/failing-open-enoent.yml'
222 },
223 {
224 'git-repo': 'https://github.com/lttng/lttng-ci.git',
225 'revision': 'master',
226 'testdef': 'lava/system-tests/lttng-test-filter.yml'
227 }
228 ],
229 'timeout': 7200
230 }
231 })
232 return command
233
234 def get_baremetal_tests_cmd():
235 command = OrderedDict({
236 'command': 'lava_test_shell',
237 'parameters': {
238 'testdef_repos': [
239 {
240 'git-repo': 'https://github.com/lttng/lttng-ci.git',
241 'revision': 'master',
242 'testdef': 'lava/system-tests/perf-tests.yml'
243 }
244 ],
245 'timeout': 3600
246 }
247 })
248 return command
249
250 def get_kvm_tests_cmd():
251 command = OrderedDict({
252 'command': 'lava_test_shell',
253 'parameters': {
254 'testdef_repos': [
255 {
256 'git-repo': 'https://github.com/lttng/lttng-ci.git',
257 'revision': 'master',
258 'testdef': 'lava/system-tests/kernel-tests.yml'
259 },
260 {
261 'git-repo': 'https://github.com/lttng/lttng-ci.git',
262 'revision': 'master',
263 'testdef': 'lava/system-tests/destructive-tests.yml'
264 }
265 ],
266 'timeout': 7200
267 }
268 })
269 return command
270
271 def get_kprobes_generate_data_cmd():
272 command = OrderedDict({
273 'command': 'lava_test_shell',
274 'parameters': {
275 'testdef_repos': [
276 {
277 'git-repo': 'https://github.com/lttng/lttng-ci.git',
278 'revision': 'master',
279 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml'
280 }
281 ],
282 'timeout': 60
283 }
284 })
285 return command
286
287 def get_kprobes_test_cmd():
288 command = OrderedDict({
289 'command': 'lava_test_shell',
290 'parameters': {
291 'testdef_repos': [
292 {
293 'git-repo': 'https://github.com/lttng/lttng-ci.git',
294 'revision': 'master',
295 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml'
296 }
297 ],
298 'timeout': 7200
299 }
300 })
301 return command
302
303 def get_results_cmd(stream_name):
304 command = OrderedDict({
305 'command': 'submit_results',
306 'parameters': {
307 'server': 'http://lava-master.internal.efficios.com/RPC2/'
308 }
309 })
310 command['parameters']['stream']='/anonymous/'+stream_name+'/'
311 return command
312
313 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
314 command = OrderedDict({
315 'command': 'deploy_kernel',
316 'metadata': {},
317 'parameters': {
318 'customize': {},
319 'kernel': None,
320 'target_type': 'ubuntu',
321 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
322 'login_prompt': 'kvm02 login:',
323 'username': 'root'
324 }
325 })
326
327 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
328 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
329 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
330 command['metadata']['jenkins_jobname'] = jenkins_job
331
332 return command
333
334 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
335 command = OrderedDict({
336 'command': 'deploy_kernel',
337 'metadata': {},
338 'parameters': {
339 'overlays': [],
340 'kernel': None,
341 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
342 'target_type': 'ubuntu'
343 }
344 })
345
346 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
347 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
348 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
349 command['metadata']['jenkins_jobname'] = jenkins_job
350 if nb_iter is not None:
351 command['metadata']['nb_iterations'] = nb_iter
352
353 return command
354
355
356 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
357 command = OrderedDict({
358 'command': 'lava_command_run',
359 'parameters': {
360 'commands': [
361 'pip3 install --upgrade pip',
362 'hash -r',
363 'pip3 install vlttng',
364 ],
365 'timeout': 3600
366 }
367 })
368
369 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
370 ' --override projects.babeltrace.build-env.PYTHON=python3' \
371 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
372 ' --profile babeltrace-stable-1.4' \
373 ' --profile babeltrace-python' \
374 ' --profile lttng-tools-master' \
375 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
376 ' --profile lttng-tools-no-man-pages'
377
378 if lttng_ust_commit is not None:
379 vlttng_cmd += ' --profile lttng-ust-master ' \
380 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
381 ' --profile lttng-ust-no-man-pages'
382
383 virtenv_path = None
384 if build_device in 'kvm':
385 virtenv_path = '/root/virtenv'
386 else:
387 virtenv_path = '/tmp/virtenv'
388
389 vlttng_cmd += ' '+virtenv_path
390
391 command['parameters']['commands'].append(vlttng_cmd)
392 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
393 command['parameters']['commands'].append('sync')
394
395 return command
396
397 def main():
398 test_type = None
399 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
400 parser.add_argument('-t', '--type', required=True)
401 parser.add_argument('-j', '--jobname', required=True)
402 parser.add_argument('-k', '--kernel', required=True)
403 parser.add_argument('-km', '--kmodule', required=True)
404 parser.add_argument('-lm', '--lmodule', required=True)
405 parser.add_argument('-tc', '--tools-commit', required=True)
406 parser.add_argument('-uc', '--ust-commit', required=False)
407 args = parser.parse_args()
408
409 if args.type in 'baremetal-benchmarks':
410 test_type = TestType.baremetal_benchmarks
411 elif args.type in 'baremetal-tests':
412 test_type = TestType.baremetal_tests
413 elif args.type in 'kvm-tests':
414 test_type = TestType.kvm_tests
415 elif args.type in 'kvm-fuzzing-tests':
416 test_type = TestType.kvm_fuzzing_tests
417 else:
418 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
419 return -1
420
421 lava_api_key = None
422 try:
423 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
424 except Exception as e:
425 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
426 return -1
427
428 if test_type is TestType.baremetal_benchmarks:
429 j = create_new_job(args.jobname, build_device='x86')
430 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
431 elif test_type is TestType.baremetal_tests:
432 j = create_new_job(args.jobname, build_device='x86')
433 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
434 elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
435 j = create_new_job(args.jobname, build_device='kvm')
436 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
437
438 j['actions'].append(get_boot_cmd())
439
440 if test_type is TestType.baremetal_benchmarks:
441 j['actions'].append(get_config_cmd('x86'))
442 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
443 j['actions'].append(get_baremetal_benchmarks_cmd())
444 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
445 elif test_type is TestType.baremetal_tests:
446 if args.ust_commit is None:
447 print('Tests runs need -uc/--ust-commit options. Exiting...')
448 return -1
449 j['actions'].append(get_config_cmd('x86'))
450 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
451 j['actions'].append(get_baremetal_tests_cmd())
452 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
453 elif test_type is TestType.kvm_tests:
454 if args.ust_commit is None:
455 print('Tests runs need -uc/--ust-commit options. Exiting...')
456 return -1
457 j['actions'].append(get_config_cmd('kvm'))
458 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
459 j['actions'].append(get_kvm_tests_cmd())
460 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
461 elif test_type is TestType.kvm_fuzzing_tests:
462 if args.ust_commit is None:
463 print('Tests runs need -uc/--ust-commit options. Exiting...')
464 return -1
465 j['actions'].append(get_config_cmd('kvm'))
466 j['actions'].append(get_kprobes_generate_data_cmd())
467 j['actions'].append(get_kprobes_test_cmd())
468 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
469 else:
470 assert False, 'Unknown test type'
471
472 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
473
474 jobid = server.scheduler.submit_job(json.dumps(j))
475
476 print('Lava jobid:{}'.format(jobid))
477 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
478
479 #Check the status of the job every 30 seconds
480 jobstatus = server.scheduler.job_status(jobid)['job_status']
481 not_running = False
482 while jobstatus in 'Submitted' or jobstatus in 'Running':
483 if not_running is False and jobstatus in 'Running':
484 print('Job started running')
485 not_running = True
486 time.sleep(30)
487 jobstatus = server.scheduler.job_status(jobid)['job_status']
488
489 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
490 print_test_output(server, jobid)
491 elif test_type is TestType.baremetal_benchmarks:
492 fetch_benchmark_results(server, jobid)
493
494 print('Job ended with {} status.'.format(jobstatus))
495 if jobstatus not in 'Complete':
496 return -1
497 else:
498 passed, failed=check_job_all_test_cases_state_count(server, jobid)
499 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
500
501 if failed == 0:
502 return 0
503 else:
504 return -1
505
506 if __name__ == "__main__":
507 sys.exit(main())
This page took 0.042001 seconds and 5 git commands to generate.