jjb: lava: kprobe-fuzzing: Fix: clear events before next iteration
[lttng-ci.git] / scripts / system-tests / lava-submit.py
... / ...
CommitLineData
1#!/usr/bin/python
2# Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3#
4# This program is free software: you can redistribute it and/or modify
5# it under the terms of the GNU General Public License as published by
6# the Free Software Foundation, either version 3 of the License, or
7# (at your option) any later version.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
13#
14# You should have received a copy of the GNU General Public License
15# along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17import argparse
18import base64
19import json
20import os
21import sys
22import time
23import xmlrpc.client
24from collections import OrderedDict
25from enum import Enum
26
27USERNAME = 'frdeso'
28HOSTNAME = 'lava-master.internal.efficios.com'
29SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35 kvm_fuzzing_tests=4
36
37def get_job_bundle_content(server, job):
38 try:
39 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
40 bundle = server.dashboard.get(bundle_sha)
41 except xmlrpc.client.Fault as f:
42 print('Error while fetching results bundle', f.faultString)
43 raise f
44
45 return json.loads(bundle['content'])
46
47# Parse the results bundle to see the run-tests testcase
48# of the lttng-kernel-tests passed successfully
49def check_job_all_test_cases_state_count(server, job):
50 content = get_job_bundle_content(server, job)
51
52 # FIXME:Those tests are part of the boot actions and fail randomly but
53 # doesn't affect the behaviour of the tests. We should update our Lava
54 # installation and try to reproduce it. This error was encountered on
55 # Ubuntu 16.04.
56 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
57
58 passed_tests=0
59 failed_tests=0
60 for run in content['test_runs']:
61 for result in run['test_results']:
62 if 'test_case_id' in result :
63 if result['result'] in 'pass':
64 passed_tests+=1
65 elif result['test_case_id'] in tests_known_to_fail:
66 pass
67 else:
68 failed_tests+=1
69 return (passed_tests, failed_tests)
70
71# Get the benchmark results from the lava bundle
72# save them as CSV files localy
73def fetch_benchmark_results(server, job):
74 content = get_job_bundle_content(server, job)
75 testcases = ['processed_results_close.csv',
76 'processed_results_ioctl.csv',
77 'processed_results_open_efault.csv',
78 'processed_results_open_enoent.csv',
79 'processed_results_dup_close.csv',
80 'processed_results_raw_syscall_getpid.csv',
81 'processed_results_lttng_test_filter.csv']
82
83 # The result bundle is a large JSON containing the results of every testcase
84 # of the LAVA job as well as the files that were attached during the run.
85 # We need to iterate over this JSON to get the base64 representation of the
86 # benchmark results produced during the run.
87 for run in content['test_runs']:
88 # We only care of the benchmark testcases
89 if 'benchmark-' in run['test_id']:
90 if 'test_results' in run:
91 for res in run['test_results']:
92 if 'attachments' in res:
93 for a in res['attachments']:
94 # We only save the results file
95 if a['pathname'] in testcases:
96 with open(a['pathname'],'wb') as f:
97 # Convert the b64 representation of the
98 # result file and write it to a file
99 # in the current working directory
100 f.write(base64.b64decode(a['content']))
101
102# Parse the attachment of the testcase to fetch the stdout of the test suite
103def print_test_output(server, job):
104 content = get_job_bundle_content(server, job)
105 found = False
106
107 for run in content['test_runs']:
108 if run['test_id'] in 'lttng-kernel-test':
109 for attachment in run['attachments']:
110 if attachment['pathname'] in 'stdout.log':
111
112 # Decode the base64 file and split on newlines to iterate
113 # on list
114 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
115
116 testoutput = testoutput.replace('\\n', '\n')
117
118 # Create a generator to iterate on the lines and keeping
119 # the state of the iterator across the two loops.
120 testoutput_iter = iter(testoutput.split('\n'))
121 for line in testoutput_iter:
122
123 # Find the header of the test case and start printing
124 # from there
125 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
126 print('---- TEST SUITE OUTPUT BEGIN ----')
127 for line in testoutput_iter:
128 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
129 print(line)
130 else:
131 # Print until we reach the end of the
132 # section
133 break
134
135 print('----- TEST SUITE OUTPUT END -----')
136 break
137
138def create_new_job(name, build_device):
139 job = OrderedDict({
140 'health_check': False,
141 'job_name': name,
142 'device_type': build_device,
143 'tags': [ ],
144 'timeout': 7200,
145 'actions': []
146 })
147 if build_device in 'x86':
148 job['tags'].append('dev-sda1')
149
150 return job
151
152def get_boot_cmd():
153 command = OrderedDict({
154 'command': 'boot_image'
155 })
156 return command
157
158def get_config_cmd(build_device):
159 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
160 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
161 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
162 'libnuma-dev', 'python3-dev', 'swig', 'stress']
163 command = OrderedDict({
164 'command': 'lava_command_run',
165 'parameters': {
166 'commands': [
167 'cat /etc/resolv.conf',
168 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
169 'groupadd tracing'
170 ],
171 'timeout':300
172 }
173 })
174 if build_device in 'x86':
175 command['parameters']['commands'].extend([
176 'mount /dev/sda1 /tmp',
177 'rm -rf /tmp/*'])
178
179 command['parameters']['commands'].extend([
180 'depmod -a',
181 'locale-gen en_US.UTF-8',
182 'apt-get update',
183 'apt-get upgrade',
184 'apt-get install -y {}'.format(' '.join(packages))
185 ])
186 return command
187
188def get_baremetal_benchmarks_cmd():
189 command = OrderedDict({
190 'command': 'lava_test_shell',
191 'parameters': {
192 'testdef_repos': [
193 {
194 'git-repo': 'https://github.com/lttng/lttng-ci.git',
195 'revision': 'master',
196 'testdef': 'lava/system-tests/failing-close.yml'
197 },
198 {
199 'git-repo': 'https://github.com/lttng/lttng-ci.git',
200 'revision': 'master',
201 'testdef': 'lava/system-tests/failing-ioctl.yml'
202 },
203 {
204 'git-repo': 'https://github.com/lttng/lttng-ci.git',
205 'revision': 'master',
206 'testdef': 'lava/system-tests/failing-open-efault.yml'
207 },
208 {
209 'git-repo': 'https://github.com/lttng/lttng-ci.git',
210 'revision': 'master',
211 'testdef': 'lava/system-tests/success-dup-close.yml'
212 },
213 {
214 'git-repo': 'https://github.com/lttng/lttng-ci.git',
215 'revision': 'master',
216 'testdef': 'lava/system-tests/raw-syscall-getpid.yml'
217 },
218 {
219 'git-repo': 'https://github.com/lttng/lttng-ci.git',
220 'revision': 'master',
221 'testdef': 'lava/system-tests/failing-open-enoent.yml'
222 },
223 {
224 'git-repo': 'https://github.com/lttng/lttng-ci.git',
225 'revision': 'master',
226 'testdef': 'lava/system-tests/lttng-test-filter.yml'
227 }
228 ],
229 'timeout': 7200
230 }
231 })
232 return command
233
234def get_baremetal_tests_cmd():
235 command = OrderedDict({
236 'command': 'lava_test_shell',
237 'parameters': {
238 'testdef_repos': [
239 {
240 'git-repo': 'https://github.com/lttng/lttng-ci.git',
241 'revision': 'master',
242 'testdef': 'lava/system-tests/perf-tests.yml'
243 }
244 ],
245 'timeout': 3600
246 }
247 })
248 return command
249
250def get_kvm_tests_cmd():
251 command = OrderedDict({
252 'command': 'lava_test_shell',
253 'parameters': {
254 'testdef_repos': [
255 {
256 'git-repo': 'https://github.com/lttng/lttng-ci.git',
257 'revision': 'master',
258 'testdef': 'lava/system-tests/kernel-tests.yml'
259 },
260 {
261 'git-repo': 'https://github.com/lttng/lttng-ci.git',
262 'revision': 'master',
263 'testdef': 'lava/system-tests/destructive-tests.yml'
264 }
265 ],
266 'timeout': 7200
267 }
268 })
269 return command
270
271def get_kprobes_generate_data_cmd():
272 command = OrderedDict({
273 'command': 'lava_test_shell',
274 'parameters': {
275 'testdef_repos': [
276 {
277 'git-repo': 'https://github.com/lttng/lttng-ci.git',
278 'revision': 'master',
279 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml'
280 }
281 ],
282 'timeout': 60
283 }
284 })
285 return command
286
287def get_kprobes_test_cmd(round_nb):
288 command = OrderedDict({
289 'command': 'lava_test_shell',
290 'parameters': {
291 'testdef_repos': [
292 {
293 'git-repo': 'https://github.com/lttng/lttng-ci.git',
294 'revision': 'master',
295 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml',
296 'parameters': { 'ROUND_NB': str(round_nb) }
297 }
298 ],
299 'timeout': 1000
300 }
301 })
302 return command
303
304def get_results_cmd(stream_name):
305 command = OrderedDict({
306 'command': 'submit_results',
307 'parameters': {
308 'server': 'http://lava-master.internal.efficios.com/RPC2/'
309 }
310 })
311 command['parameters']['stream']='/anonymous/'+stream_name+'/'
312 return command
313
314def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
315 command = OrderedDict({
316 'command': 'deploy_kernel',
317 'metadata': {},
318 'parameters': {
319 'customize': {},
320 'kernel': None,
321 'target_type': 'ubuntu',
322 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
323 'login_prompt': 'kvm02 login:',
324 'username': 'root'
325 }
326 })
327
328 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
329 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
330 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
331 command['metadata']['jenkins_jobname'] = jenkins_job
332
333 return command
334
335def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
336 command = OrderedDict({
337 'command': 'deploy_kernel',
338 'metadata': {},
339 'parameters': {
340 'overlays': [],
341 'kernel': None,
342 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
343 'target_type': 'ubuntu'
344 }
345 })
346
347 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
348 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
349 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
350 command['metadata']['jenkins_jobname'] = jenkins_job
351 if nb_iter is not None:
352 command['metadata']['nb_iterations'] = nb_iter
353
354 return command
355
356
357def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
358 command = OrderedDict({
359 'command': 'lava_command_run',
360 'parameters': {
361 'commands': [
362 'pip3 install --upgrade pip',
363 'hash -r',
364 'pip3 install vlttng',
365 ],
366 'timeout': 3600
367 }
368 })
369
370 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
371 ' --override projects.babeltrace.build-env.PYTHON=python3' \
372 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
373 ' --profile babeltrace-stable-1.4' \
374 ' --profile babeltrace-python' \
375 ' --profile lttng-tools-master' \
376 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
377 ' --profile lttng-tools-no-man-pages'
378
379 if lttng_ust_commit is not None:
380 vlttng_cmd += ' --profile lttng-ust-master ' \
381 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
382 ' --profile lttng-ust-no-man-pages'
383
384 virtenv_path = None
385 if build_device in 'kvm':
386 virtenv_path = '/root/virtenv'
387 else:
388 virtenv_path = '/tmp/virtenv'
389
390 vlttng_cmd += ' '+virtenv_path
391
392 command['parameters']['commands'].append(vlttng_cmd)
393 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
394 command['parameters']['commands'].append('sync')
395
396 return command
397
398def main():
399 test_type = None
400 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
401 parser.add_argument('-t', '--type', required=True)
402 parser.add_argument('-j', '--jobname', required=True)
403 parser.add_argument('-k', '--kernel', required=True)
404 parser.add_argument('-km', '--kmodule', required=True)
405 parser.add_argument('-lm', '--lmodule', required=True)
406 parser.add_argument('-tc', '--tools-commit', required=True)
407 parser.add_argument('-uc', '--ust-commit', required=False)
408 args = parser.parse_args()
409
410 if args.type in 'baremetal-benchmarks':
411 test_type = TestType.baremetal_benchmarks
412 elif args.type in 'baremetal-tests':
413 test_type = TestType.baremetal_tests
414 elif args.type in 'kvm-tests':
415 test_type = TestType.kvm_tests
416 elif args.type in 'kvm-fuzzing-tests':
417 test_type = TestType.kvm_fuzzing_tests
418 else:
419 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
420 return -1
421
422 lava_api_key = None
423 try:
424 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
425 except Exception as e:
426 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
427 return -1
428
429 if test_type is TestType.baremetal_benchmarks:
430 j = create_new_job(args.jobname, build_device='x86')
431 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
432 elif test_type is TestType.baremetal_tests:
433 j = create_new_job(args.jobname, build_device='x86')
434 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
435 elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
436 j = create_new_job(args.jobname, build_device='kvm')
437 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
438
439 j['actions'].append(get_boot_cmd())
440
441 if test_type is TestType.baremetal_benchmarks:
442 j['actions'].append(get_config_cmd('x86'))
443 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
444 j['actions'].append(get_baremetal_benchmarks_cmd())
445 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
446 elif test_type is TestType.baremetal_tests:
447 if args.ust_commit is None:
448 print('Tests runs need -uc/--ust-commit options. Exiting...')
449 return -1
450 j['actions'].append(get_config_cmd('x86'))
451 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
452 j['actions'].append(get_baremetal_tests_cmd())
453 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
454 elif test_type is TestType.kvm_tests:
455 if args.ust_commit is None:
456 print('Tests runs need -uc/--ust-commit options. Exiting...')
457 return -1
458 j['actions'].append(get_config_cmd('kvm'))
459 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
460 j['actions'].append(get_kvm_tests_cmd())
461 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
462 elif test_type is TestType.kvm_fuzzing_tests:
463 if args.ust_commit is None:
464 print('Tests runs need -uc/--ust-commit options. Exiting...')
465 return -1
466 j['actions'].append(get_config_cmd('kvm'))
467 j['actions'].append(get_kprobes_generate_data_cmd())
468 for i in range(10):
469 j['actions'].append(get_kprobes_test_cmd(round_nb=i))
470 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
471 else:
472 assert False, 'Unknown test type'
473
474 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
475
476 jobid = server.scheduler.submit_job(json.dumps(j))
477
478 print('Lava jobid:{}'.format(jobid))
479 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
480
481 #Check the status of the job every 30 seconds
482 jobstatus = server.scheduler.job_status(jobid)['job_status']
483 not_running = False
484 while jobstatus in 'Submitted' or jobstatus in 'Running':
485 if not_running is False and jobstatus in 'Running':
486 print('Job started running')
487 not_running = True
488 time.sleep(30)
489 jobstatus = server.scheduler.job_status(jobid)['job_status']
490
491 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
492 print_test_output(server, jobid)
493 elif test_type is TestType.baremetal_benchmarks:
494 fetch_benchmark_results(server, jobid)
495
496 print('Job ended with {} status.'.format(jobstatus))
497 if jobstatus not in 'Complete':
498 return -1
499 else:
500 passed, failed=check_job_all_test_cases_state_count(server, jobid)
501 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
502
503 if failed == 0:
504 return 0
505 else:
506 return -1
507
508if __name__ == "__main__":
509 sys.exit(main())
This page took 0.025162 seconds and 4 git commands to generate.