Lava v2 submit initial submit script
[lttng-ci.git] / scripts / system-tests / lava-v2-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import random
22 import sys
23 import time
24 import xmlrpc.client
25 from collections import OrderedDict
26 from enum import Enum
27
28 USERNAME = 'frdeso'
29 HOSTNAME = 'lava-master.internal.efficios.com'
30 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
31
32 class TestType():
33 baremetal_benchmarks=1
34 baremetal_tests=2
35 kvm_tests=3
36 kvm_fuzzing_tests=4
37 values = {
38 'baremetal-benchmarks' : baremetal_benchmarks,
39 'baremetal-tests' : baremetal_tests,
40 'kvm-tests' : kvm_tests,
41 'kvm-fuzzin-tests' : kvm_fuzzing_tests,
42 }
43
44 def get_job_bundle_content(server, job):
45 try:
46 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
47 bundle = server.dashboard.get(bundle_sha)
48 except xmlrpc.client.Fault as f:
49 print('Error while fetching results bundle', f.faultString)
50 raise f
51
52 return json.loads(bundle['content'])
53
54 # Parse the results bundle to see the run-tests testcase
55 # of the lttng-kernel-tests passed successfully
56 def check_job_all_test_cases_state_count(server, job):
57 content = get_job_bundle_content(server, job)
58
59 # FIXME:Those tests are part of the boot actions and fail randomly but
60 # doesn't affect the behaviour of the tests. We should update our Lava
61 # installation and try to reproduce it. This error was encountered on
62 # Ubuntu 16.04.
63 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
64
65 passed_tests=0
66 failed_tests=0
67 for run in content['test_runs']:
68 for result in run['test_results']:
69 if 'test_case_id' in result :
70 if result['result'] in 'pass':
71 passed_tests+=1
72 elif result['test_case_id'] in tests_known_to_fail:
73 pass
74 else:
75 failed_tests+=1
76 return (passed_tests, failed_tests)
77
78 # Get the benchmark results from the lava bundle
79 # save them as CSV files localy
80 def fetch_benchmark_results(server, job):
81 content = get_job_bundle_content(server, job)
82 testcases = ['processed_results_close.csv',
83 'processed_results_ioctl.csv',
84 'processed_results_open_efault.csv',
85 'processed_results_open_enoent.csv',
86 'processed_results_dup_close.csv',
87 'processed_results_raw_syscall_getpid.csv',
88 'processed_results_lttng_test_filter.csv']
89
90 # The result bundle is a large JSON containing the results of every testcase
91 # of the LAVA job as well as the files that were attached during the run.
92 # We need to iterate over this JSON to get the base64 representation of the
93 # benchmark results produced during the run.
94 for run in content['test_runs']:
95 # We only care of the benchmark testcases
96 if 'benchmark-' in run['test_id']:
97 if 'test_results' in run:
98 for res in run['test_results']:
99 if 'attachments' in res:
100 for a in res['attachments']:
101 # We only save the results file
102 if a['pathname'] in testcases:
103 with open(a['pathname'],'wb') as f:
104 # Convert the b64 representation of the
105 # result file and write it to a file
106 # in the current working directory
107 f.write(base64.b64decode(a['content']))
108
109 # Parse the attachment of the testcase to fetch the stdout of the test suite
110 def print_test_output(server, job):
111 content = get_job_bundle_content(server, job)
112 found = False
113
114 for run in content['test_runs']:
115 if run['test_id'] in 'lttng-kernel-test':
116 for attachment in run['attachments']:
117 if attachment['pathname'] in 'stdout.log':
118
119 # Decode the base64 file and split on newlines to iterate
120 # on list
121 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
122
123 testoutput = testoutput.replace('\\n', '\n')
124
125 # Create a generator to iterate on the lines and keeping
126 # the state of the iterator across the two loops.
127 testoutput_iter = iter(testoutput.split('\n'))
128 for line in testoutput_iter:
129
130 # Find the header of the test case and start printing
131 # from there
132 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
133 print('---- TEST SUITE OUTPUT BEGIN ----')
134 for line in testoutput_iter:
135 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
136 print(line)
137 else:
138 # Print until we reach the end of the
139 # section
140 break
141
142 print('----- TEST SUITE OUTPUT END -----')
143 break
144
145 def create_new_job(name, build_device):
146 job = OrderedDict({
147 'health_check': False,
148 'job_name': name,
149 'device_type': build_device,
150 'tags': [ ],
151 'timeout': 7200,
152 'actions': []
153 })
154 if build_device in 'x86':
155 job['tags'].append('dev-sda1')
156
157 return job
158
159 def get_boot_cmd():
160 command = OrderedDict({
161 'command': 'boot_image'
162 })
163 return command
164
165 def get_config_cmd(build_device):
166 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
167 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
168 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
169 'libnuma-dev', 'python3-dev', 'swig', 'stress']
170 command = OrderedDict({
171 'command': 'lava_command_run',
172 'parameters': {
173 'commands': [
174 'cat /etc/resolv.conf',
175 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
176 'groupadd tracing'
177 ],
178 'timeout':300
179 }
180 })
181 if build_device in 'x86':
182 command['parameters']['commands'].extend([
183 'mount /dev/sda1 /tmp',
184 'rm -rf /tmp/*'])
185
186 command['parameters']['commands'].extend([
187 'depmod -a',
188 'locale-gen en_US.UTF-8',
189 'apt-get update',
190 'apt-get upgrade',
191 'apt-get install -y {}'.format(' '.join(packages))
192 ])
193 return command
194
195 def get_baremetal_benchmarks_cmd():
196 command = OrderedDict({
197 'command': 'lava_test_shell',
198 'parameters': {
199 'testdef_repos': [
200 {
201 'git-repo': 'https://github.com/lttng/lttng-ci.git',
202 'revision': 'master',
203 'testdef': 'lava/system-tests/failing-close.yml'
204 },
205 {
206 'git-repo': 'https://github.com/lttng/lttng-ci.git',
207 'revision': 'master',
208 'testdef': 'lava/system-tests/failing-ioctl.yml'
209 },
210 {
211 'git-repo': 'https://github.com/lttng/lttng-ci.git',
212 'revision': 'master',
213 'testdef': 'lava/system-tests/failing-open-efault.yml'
214 },
215 {
216 'git-repo': 'https://github.com/lttng/lttng-ci.git',
217 'revision': 'master',
218 'testdef': 'lava/system-tests/success-dup-close.yml'
219 },
220 {
221 'git-repo': 'https://github.com/lttng/lttng-ci.git',
222 'revision': 'master',
223 'testdef': 'lava/system-tests/raw-syscall-getpid.yml'
224 },
225 {
226 'git-repo': 'https://github.com/lttng/lttng-ci.git',
227 'revision': 'master',
228 'testdef': 'lava/system-tests/failing-open-enoent.yml'
229 },
230 {
231 'git-repo': 'https://github.com/lttng/lttng-ci.git',
232 'revision': 'master',
233 'testdef': 'lava/system-tests/lttng-test-filter.yml'
234 }
235 ],
236 'timeout': 7200
237 }
238 })
239 return command
240
241 def get_baremetal_tests_cmd():
242 command = OrderedDict({
243 'command': 'lava_test_shell',
244 'parameters': {
245 'testdef_repos': [
246 {
247 'git-repo': 'https://github.com/lttng/lttng-ci.git',
248 'revision': 'master',
249 'testdef': 'lava/system-tests/perf-tests.yml'
250 }
251 ],
252 'timeout': 3600
253 }
254 })
255 return command
256
257 def get_kvm_tests_cmd():
258 command = OrderedDict({
259 'command': 'lava_test_shell',
260 'parameters': {
261 'testdef_repos': [
262 {
263 'git-repo': 'https://github.com/lttng/lttng-ci.git',
264 'revision': 'master',
265 'testdef': 'lava/system-tests/kernel-tests.yml'
266 },
267 {
268 'git-repo': 'https://github.com/lttng/lttng-ci.git',
269 'revision': 'master',
270 'testdef': 'lava/system-tests/destructive-tests.yml'
271 }
272 ],
273 'timeout': 7200
274 }
275 })
276 return command
277
278 def get_kprobes_generate_data_cmd():
279 random_seed = random.randint(0, 1000000)
280 command = OrderedDict({
281 'command': 'lava_test_shell',
282 'parameters': {
283 'testdef_repos': [
284 {
285 'git-repo': 'https://github.com/lttng/lttng-ci.git',
286 'revision': 'master',
287 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml',
288 'parameters': { 'RANDOM_SEED': str(random_seed) }
289 }
290 ],
291 'timeout': 60
292 }
293 })
294 return command
295
296 def get_kprobes_test_cmd(round_nb):
297 command = OrderedDict({
298 'command': 'lava_test_shell',
299 'parameters': {
300 'testdef_repos': [
301 {
302 'git-repo': 'https://github.com/lttng/lttng-ci.git',
303 'revision': 'master',
304 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml',
305 'parameters': { 'ROUND_NB': str(round_nb) }
306 }
307 ],
308 'timeout': 1000
309 }
310 })
311 return command
312
313 def get_results_cmd(stream_name):
314 command = OrderedDict({
315 'command': 'submit_results',
316 'parameters': {
317 'server': 'http://lava-master.internal.efficios.com/RPC2/'
318 }
319 })
320 command['parameters']['stream']='/anonymous/'+stream_name+'/'
321 return command
322
323 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
324 command = OrderedDict({
325 'command': 'deploy_kernel',
326 'metadata': {},
327 'parameters': {
328 'customize': {},
329 'kernel': None,
330 'target_type': 'ubuntu',
331 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
332 'login_prompt': 'kvm02 login:',
333 'username': 'root'
334 }
335 })
336
337 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
338 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
339 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
340 command['metadata']['jenkins_jobname'] = jenkins_job
341
342 return command
343
344 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
345 command = OrderedDict({
346 'command': 'deploy_kernel',
347 'metadata': {},
348 'parameters': {
349 'overlays': [],
350 'kernel': None,
351 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
352 'target_type': 'ubuntu'
353 }
354 })
355
356 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
357 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
358 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
359 command['metadata']['jenkins_jobname'] = jenkins_job
360 if nb_iter is not None:
361 command['metadata']['nb_iterations'] = nb_iter
362
363 return command
364
365
366 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
367 command = OrderedDict({
368 'command': 'lava_command_run',
369 'parameters': {
370 'commands': [
371 'pip3 install --upgrade pip',
372 'hash -r',
373 'pip3 install vlttng',
374 ],
375 'timeout': 3600
376 }
377 })
378
379 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
380 ' --override projects.babeltrace.build-env.PYTHON=python3' \
381 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
382 ' --profile babeltrace-stable-1.4' \
383 ' --profile babeltrace-python' \
384 ' --profile lttng-tools-master' \
385 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
386 ' --profile lttng-tools-no-man-pages'
387
388 if lttng_ust_commit is not None:
389 vlttng_cmd += ' --profile lttng-ust-master ' \
390 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
391 ' --profile lttng-ust-no-man-pages'
392
393 virtenv_path = None
394 if build_device in 'kvm':
395 virtenv_path = '/root/virtenv'
396 else:
397 virtenv_path = '/tmp/virtenv'
398
399 vlttng_cmd += ' '+virtenv_path
400
401 command['parameters']['commands'].append(vlttng_cmd)
402 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
403 command['parameters']['commands'].append('sync')
404
405 return command
406
407 def main():
408 test_type = None
409 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
410 parser.add_argument('-t', '--type', required=True)
411 parser.add_argument('-j', '--jobname', required=True)
412 parser.add_argument('-k', '--kernel', required=True)
413 parser.add_argument('-km', '--kmodule', required=True)
414 parser.add_argument('-lm', '--lmodule', required=True)
415 parser.add_argument('-tc', '--tools-commit', required=True)
416 parser.add_argument('-uc', '--ust-commit', required=False)
417 args = parser.parse_args()
418
419 if args.type not in TestType.values:
420 print('argument -t/--type {} unrecognized.'.format(args.type))
421 print('Possible values are:')
422 for k in TestType.values:
423 print('\t {}'.format(k))
424 return -1
425 test_type = TestType.values[args.type]
426
427 lava_api_key = None
428 try:
429 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
430 except Exception as e:
431 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
432 return -1
433
434 if test_type is TestType.baremetal_benchmarks:
435 j = create_new_job(args.jobname, build_device='x86')
436 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
437 elif test_type is TestType.baremetal_tests:
438 j = create_new_job(args.jobname, build_device='x86')
439 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
440 elif test_type is TestType.kvm_tests or test_type is TestType.kvm_fuzzing_tests:
441 j = create_new_job(args.jobname, build_device='kvm')
442 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
443
444 j['actions'].append(get_boot_cmd())
445
446 if test_type is TestType.baremetal_benchmarks:
447 j['actions'].append(get_config_cmd('x86'))
448 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
449 j['actions'].append(get_baremetal_benchmarks_cmd())
450 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
451 elif test_type is TestType.baremetal_tests:
452 if args.ust_commit is None:
453 print('Tests runs need -uc/--ust-commit options. Exiting...')
454 return -1
455 j['actions'].append(get_config_cmd('x86'))
456 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
457 j['actions'].append(get_baremetal_tests_cmd())
458 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
459 elif test_type is TestType.kvm_tests:
460 if args.ust_commit is None:
461 print('Tests runs need -uc/--ust-commit options. Exiting...')
462 return -1
463 j['actions'].append(get_config_cmd('kvm'))
464 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
465 j['actions'].append(get_kvm_tests_cmd())
466 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
467 elif test_type is TestType.kvm_fuzzing_tests:
468 if args.ust_commit is None:
469 print('Tests runs need -uc/--ust-commit options. Exiting...')
470 return -1
471 j['actions'].append(get_config_cmd('kvm'))
472 j['actions'].append(get_kprobes_generate_data_cmd())
473 for i in range(10):
474 j['actions'].append(get_kprobes_test_cmd(round_nb=i))
475 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
476 else:
477 assert False, 'Unknown test type'
478
479 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
480
481 jobid = server.scheduler.submit_job(json.dumps(j))
482
483 print('Lava jobid:{}'.format(jobid))
484 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
485
486 #Check the status of the job every 30 seconds
487 jobstatus = server.scheduler.job_status(jobid)['job_status']
488 not_running = False
489 while jobstatus in 'Submitted' or jobstatus in 'Running':
490 if not_running is False and jobstatus in 'Running':
491 print('Job started running')
492 not_running = True
493 time.sleep(30)
494 jobstatus = server.scheduler.job_status(jobid)['job_status']
495
496 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
497 print_test_output(server, jobid)
498 elif test_type is TestType.baremetal_benchmarks:
499 fetch_benchmark_results(server, jobid)
500
501 print('Job ended with {} status.'.format(jobstatus))
502 if jobstatus not in 'Complete':
503 return -1
504 else:
505 passed, failed=check_job_all_test_cases_state_count(server, jobid)
506 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
507
508 if failed == 0:
509 return 0
510 else:
511 return -1
512
513 if __name__ == "__main__":
514 sys.exit(main())
This page took 0.040902 seconds and 4 git commands to generate.