jjb: lava: kprobe-fuzzing: Add random seed param for generation script
[lttng-ci.git] / scripts / system-tests / lava-submit.py
... / ...
CommitLineData
1#!/usr/bin/python
2# Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3#
4# This program is free software: you can redistribute it and/or modify
5# it under the terms of the GNU General Public License as published by
6# the Free Software Foundation, either version 3 of the License, or
7# (at your option) any later version.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
13#
14# You should have received a copy of the GNU General Public License
15# along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17import argparse
18import base64
19import json
20import os
21import random
22import sys
23import time
24import xmlrpc.client
25from collections import OrderedDict
26from enum import Enum
27
28USERNAME = 'frdeso'
29HOSTNAME = 'lava-master.internal.efficios.com'
30SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
31
32class TestType(Enum):
33 baremetal_benchmarks=1
34 baremetal_tests=2
35 kvm_tests=3
36 kvm_fuzzing_tests=4
37
38def get_job_bundle_content(server, job):
39 try:
40 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
41 bundle = server.dashboard.get(bundle_sha)
42 except xmlrpc.client.Fault as f:
43 print('Error while fetching results bundle', f.faultString)
44 raise f
45
46 return json.loads(bundle['content'])
47
48# Parse the results bundle to see the run-tests testcase
49# of the lttng-kernel-tests passed successfully
50def check_job_all_test_cases_state_count(server, job):
51 content = get_job_bundle_content(server, job)
52
53 # FIXME:Those tests are part of the boot actions and fail randomly but
54 # doesn't affect the behaviour of the tests. We should update our Lava
55 # installation and try to reproduce it. This error was encountered on
56 # Ubuntu 16.04.
57 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
58
59 passed_tests=0
60 failed_tests=0
61 for run in content['test_runs']:
62 for result in run['test_results']:
63 if 'test_case_id' in result :
64 if result['result'] in 'pass':
65 passed_tests+=1
66 elif result['test_case_id'] in tests_known_to_fail:
67 pass
68 else:
69 failed_tests+=1
70 return (passed_tests, failed_tests)
71
72# Get the benchmark results from the lava bundle
73# save them as CSV files localy
74def fetch_benchmark_results(server, job):
75 content = get_job_bundle_content(server, job)
76 testcases = ['processed_results_close.csv',
77 'processed_results_ioctl.csv',
78 'processed_results_open_efault.csv',
79 'processed_results_open_enoent.csv',
80 'processed_results_dup_close.csv',
81 'processed_results_raw_syscall_getpid.csv',
82 'processed_results_lttng_test_filter.csv']
83
84 # The result bundle is a large JSON containing the results of every testcase
85 # of the LAVA job as well as the files that were attached during the run.
86 # We need to iterate over this JSON to get the base64 representation of the
87 # benchmark results produced during the run.
88 for run in content['test_runs']:
89 # We only care of the benchmark testcases
90 if 'benchmark-' in run['test_id']:
91 if 'test_results' in run:
92 for res in run['test_results']:
93 if 'attachments' in res:
94 for a in res['attachments']:
95 # We only save the results file
96 if a['pathname'] in testcases:
97 with open(a['pathname'],'wb') as f:
98 # Convert the b64 representation of the
99 # result file and write it to a file
100 # in the current working directory
101 f.write(base64.b64decode(a['content']))
102
103# Parse the attachment of the testcase to fetch the stdout of the test suite
104def print_test_output(server, job):
105 content = get_job_bundle_content(server, job)
106 found = False
107
108 for run in content['test_runs']:
109 if run['test_id'] in 'lttng-kernel-test':
110 for attachment in run['attachments']:
111 if attachment['pathname'] in 'stdout.log':
112
113 # Decode the base64 file and split on newlines to iterate
114 # on list
115 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
116
117 testoutput = testoutput.replace('\\n', '\n')
118
119 # Create a generator to iterate on the lines and keeping
120 # the state of the iterator across the two loops.
121 testoutput_iter = iter(testoutput.split('\n'))
122 for line in testoutput_iter:
123
124 # Find the header of the test case and start printing
125 # from there
126 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
127 print('---- TEST SUITE OUTPUT BEGIN ----')
128 for line in testoutput_iter:
129 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
130 print(line)
131 else:
132 # Print until we reach the end of the
133 # section
134 break
135
136 print('----- TEST SUITE OUTPUT END -----')
137 break
138
139def create_new_job(name, build_device):
140 job = OrderedDict({
141 'health_check': False,
142 'job_name': name,
143 'device_type': build_device,
144 'tags': [ ],
145 'timeout': 7200,
146 'actions': []
147 })
148 if build_device in 'x86':
149 job['tags'].append('dev-sda1')
150
151 return job
152
153def get_boot_cmd():
154 command = OrderedDict({
155 'command': 'boot_image'
156 })
157 return command
158
159def get_config_cmd(build_device):
160 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
161 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
162 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
163 'libnuma-dev', 'python3-dev', 'swig', 'stress']
164 command = OrderedDict({
165 'command': 'lava_command_run',
166 'parameters': {
167 'commands': [
168 'cat /etc/resolv.conf',
169 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
170 'groupadd tracing'
171 ],
172 'timeout':300
173 }
174 })
175 if build_device in 'x86':
176 command['parameters']['commands'].extend([
177 'mount /dev/sda1 /tmp',
178 'rm -rf /tmp/*'])
179
180 command['parameters']['commands'].extend([
181 'depmod -a',
182 'locale-gen en_US.UTF-8',
183 'apt-get update',
184 'apt-get upgrade',
185 'apt-get install -y {}'.format(' '.join(packages))
186 ])
187 return command
188
189def get_baremetal_benchmarks_cmd():
190 command = OrderedDict({
191 'command': 'lava_test_shell',
192 'parameters': {
193 'testdef_repos': [
194 {
195 'git-repo': 'https://github.com/lttng/lttng-ci.git',
196 'revision': 'master',
197 'testdef': 'lava/system-tests/failing-close.yml'
198 },
199 {
200 'git-repo': 'https://github.com/lttng/lttng-ci.git',
201 'revision': 'master',
202 'testdef': 'lava/system-tests/failing-ioctl.yml'
203 },
204 {
205 'git-repo': 'https://github.com/lttng/lttng-ci.git',
206 'revision': 'master',
207 'testdef': 'lava/system-tests/failing-open-efault.yml'
208 },
209 {
210 'git-repo': 'https://github.com/lttng/lttng-ci.git',
211 'revision': 'master',
212 'testdef': 'lava/system-tests/success-dup-close.yml'
213 },
214 {
215 'git-repo': 'https://github.com/lttng/lttng-ci.git',
216 'revision': 'master',
217 'testdef': 'lava/system-tests/raw-syscall-getpid.yml'
218 },
219 {
220 'git-repo': 'https://github.com/lttng/lttng-ci.git',
221 'revision': 'master',
222 'testdef': 'lava/system-tests/failing-open-enoent.yml'
223 },
224 {
225 'git-repo': 'https://github.com/lttng/lttng-ci.git',
226 'revision': 'master',
227 'testdef': 'lava/system-tests/lttng-test-filter.yml'
228 }
229 ],
230 'timeout': 7200
231 }
232 })
233 return command
234
235def get_baremetal_tests_cmd():
236 command = OrderedDict({
237 'command': 'lava_test_shell',
238 'parameters': {
239 'testdef_repos': [
240 {
241 'git-repo': 'https://github.com/lttng/lttng-ci.git',
242 'revision': 'master',
243 'testdef': 'lava/system-tests/perf-tests.yml'
244 }
245 ],
246 'timeout': 3600
247 }
248 })
249 return command
250
251def get_kvm_tests_cmd():
252 command = OrderedDict({
253 'command': 'lava_test_shell',
254 'parameters': {
255 'testdef_repos': [
256 {
257 'git-repo': 'https://github.com/lttng/lttng-ci.git',
258 'revision': 'master',
259 'testdef': 'lava/system-tests/kernel-tests.yml'
260 },
261 {
262 'git-repo': 'https://github.com/lttng/lttng-ci.git',
263 'revision': 'master',
264 'testdef': 'lava/system-tests/destructive-tests.yml'
265 }
266 ],
267 'timeout': 7200
268 }
269 })
270 return command
271
272def get_kprobes_generate_data_cmd():
273 random_seed = random.randint(0, 1000000)
274 command = OrderedDict({
275 'command': 'lava_test_shell',
276 'parameters': {
277 'testdef_repos': [
278 {
279 'git-repo': 'https://github.com/lttng/lttng-ci.git',
280 'revision': 'master',
281 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml',
282 'parameters': { 'RANDOM_SEED': str(random_seed) }
283 }
284 ],
285 'timeout': 60
286 }
287 })
288 return command
289
290def get_kprobes_test_cmd(round_nb):
291 command = OrderedDict({
292 'command': 'lava_test_shell',
293 'parameters': {
294 'testdef_repos': [
295 {
296 'git-repo': 'https://github.com/lttng/lttng-ci.git',
297 'revision': 'master',
298 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml',
299 'parameters': { 'ROUND_NB': str(round_nb) }
300 }
301 ],
302 'timeout': 1000
303 }
304 })
305 return command
306
307def get_results_cmd(stream_name):
308 command = OrderedDict({
309 'command': 'submit_results',
310 'parameters': {
311 'server': 'http://lava-master.internal.efficios.com/RPC2/'
312 }
313 })
314 command['parameters']['stream']='/anonymous/'+stream_name+'/'
315 return command
316
317def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
318 command = OrderedDict({
319 'command': 'deploy_kernel',
320 'metadata': {},
321 'parameters': {
322 'customize': {},
323 'kernel': None,
324 'target_type': 'ubuntu',
325 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
326 'login_prompt': 'kvm02 login:',
327 'username': 'root'
328 }
329 })
330
331 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
332 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
333 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
334 command['metadata']['jenkins_jobname'] = jenkins_job
335
336 return command
337
338def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
339 command = OrderedDict({
340 'command': 'deploy_kernel',
341 'metadata': {},
342 'parameters': {
343 'overlays': [],
344 'kernel': None,
345 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
346 'target_type': 'ubuntu'
347 }
348 })
349
350 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
351 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
352 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
353 command['metadata']['jenkins_jobname'] = jenkins_job
354 if nb_iter is not None:
355 command['metadata']['nb_iterations'] = nb_iter
356
357 return command
358
359
360def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
361 command = OrderedDict({
362 'command': 'lava_command_run',
363 'parameters': {
364 'commands': [
365 'pip3 install --upgrade pip',
366 'hash -r',
367 'pip3 install vlttng',
368 ],
369 'timeout': 3600
370 }
371 })
372
373 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
374 ' --override projects.babeltrace.build-env.PYTHON=python3' \
375 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
376 ' --profile babeltrace-stable-1.4' \
377 ' --profile babeltrace-python' \
378 ' --profile lttng-tools-master' \
379 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
380 ' --profile lttng-tools-no-man-pages'
381
382 if lttng_ust_commit is not None:
383 vlttng_cmd += ' --profile lttng-ust-master ' \
384 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
385 ' --profile lttng-ust-no-man-pages'
386
387 virtenv_path = None
388 if build_device in 'kvm':
389 virtenv_path = '/root/virtenv'
390 else:
391 virtenv_path = '/tmp/virtenv'
392
393 vlttng_cmd += ' '+virtenv_path
394
395 command['parameters']['commands'].append(vlttng_cmd)
396 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
397 command['parameters']['commands'].append('sync')
398
399 return command
400
401def main():
402 test_type = None
403 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
404 parser.add_argument('-t', '--type', required=True)
405 parser.add_argument('-j', '--jobname', required=True)
406 parser.add_argument('-k', '--kernel', required=True)
407 parser.add_argument('-km', '--kmodule', required=True)
408 parser.add_argument('-lm', '--lmodule', required=True)
409 parser.add_argument('-tc', '--tools-commit', required=True)
410 parser.add_argument('-uc', '--ust-commit', required=False)
411 args = parser.parse_args()
412
413 if args.type in 'baremetal-benchmarks':
414 test_type = TestType.baremetal_benchmarks
415 elif args.type in 'baremetal-tests':
416 test_type = TestType.baremetal_tests
417 elif args.type in 'kvm-tests':
418 test_type = TestType.kvm_tests
419 elif args.type in 'kvm-fuzzing-tests':
420 test_type = TestType.kvm_fuzzing_tests
421 else:
422 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
423 return -1
424
425 lava_api_key = None
426 try:
427 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
428 except Exception as e:
429 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
430 return -1
431
432 if test_type is TestType.baremetal_benchmarks:
433 j = create_new_job(args.jobname, build_device='x86')
434 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
435 elif test_type is TestType.baremetal_tests:
436 j = create_new_job(args.jobname, build_device='x86')
437 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
438 elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
439 j = create_new_job(args.jobname, build_device='kvm')
440 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
441
442 j['actions'].append(get_boot_cmd())
443
444 if test_type is TestType.baremetal_benchmarks:
445 j['actions'].append(get_config_cmd('x86'))
446 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
447 j['actions'].append(get_baremetal_benchmarks_cmd())
448 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
449 elif test_type is TestType.baremetal_tests:
450 if args.ust_commit is None:
451 print('Tests runs need -uc/--ust-commit options. Exiting...')
452 return -1
453 j['actions'].append(get_config_cmd('x86'))
454 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
455 j['actions'].append(get_baremetal_tests_cmd())
456 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
457 elif test_type is TestType.kvm_tests:
458 if args.ust_commit is None:
459 print('Tests runs need -uc/--ust-commit options. Exiting...')
460 return -1
461 j['actions'].append(get_config_cmd('kvm'))
462 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
463 j['actions'].append(get_kvm_tests_cmd())
464 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
465 elif test_type is TestType.kvm_fuzzing_tests:
466 if args.ust_commit is None:
467 print('Tests runs need -uc/--ust-commit options. Exiting...')
468 return -1
469 j['actions'].append(get_config_cmd('kvm'))
470 j['actions'].append(get_kprobes_generate_data_cmd())
471 for i in range(10):
472 j['actions'].append(get_kprobes_test_cmd(round_nb=i))
473 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
474 else:
475 assert False, 'Unknown test type'
476
477 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
478
479 jobid = server.scheduler.submit_job(json.dumps(j))
480
481 print('Lava jobid:{}'.format(jobid))
482 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
483
484 #Check the status of the job every 30 seconds
485 jobstatus = server.scheduler.job_status(jobid)['job_status']
486 not_running = False
487 while jobstatus in 'Submitted' or jobstatus in 'Running':
488 if not_running is False and jobstatus in 'Running':
489 print('Job started running')
490 not_running = True
491 time.sleep(30)
492 jobstatus = server.scheduler.job_status(jobid)['job_status']
493
494 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
495 print_test_output(server, jobid)
496 elif test_type is TestType.baremetal_benchmarks:
497 fetch_benchmark_results(server, jobid)
498
499 print('Job ended with {} status.'.format(jobstatus))
500 if jobstatus not in 'Complete':
501 return -1
502 else:
503 passed, failed=check_job_all_test_cases_state_count(server, jobid)
504 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
505
506 if failed == 0:
507 return 0
508 else:
509 return -1
510
511if __name__ == "__main__":
512 sys.exit(main())
This page took 0.024055 seconds and 4 git commands to generate.