jjb: lava: Move kprobe test to different test shell
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35
36 def get_job_bundle_content(server, job):
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
42
43 return json.loads(bundle['content'])
44
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
50 # FIXME:Those tests are part of the boot actions and fail randomly but
51 # doesn't affect the behaviour of the tests. We should update our Lava
52 # installation and try to reproduce it. This error was encountered on
53 # Ubuntu 16.04.
54 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
55
56 passed_tests=0
57 failed_tests=0
58 for run in content['test_runs']:
59 for result in run['test_results']:
60 if 'test_case_id' in result :
61 if result['result'] in 'pass':
62 passed_tests+=1
63 elif result['test_case_id'] in tests_known_to_fail:
64 pass
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
73 testcases = ['processed_results_close.csv',
74 'processed_results_ioctl.csv',
75 'processed_results_open_efault.csv',
76 'processed_results_open_enoent.csv',
77 'processed_results_dup_close.csv',
78 'processed_results_raw_syscall_getpid.csv',
79 'processed_results_lttng_test_filter.csv']
80
81 # The result bundle is a large JSON containing the results of every testcase
82 # of the LAVA job as well as the files that were attached during the run.
83 # We need to iterate over this JSON to get the base64 representation of the
84 # benchmark results produced during the run.
85 for run in content['test_runs']:
86 # We only care of the benchmark testcases
87 if 'benchmark-' in run['test_id']:
88 if 'test_results' in run:
89 for res in run['test_results']:
90 if 'attachments' in res:
91 for a in res['attachments']:
92 # We only save the results file
93 if a['pathname'] in testcases:
94 with open(a['pathname'],'wb') as f:
95 # Convert the b64 representation of the
96 # result file and write it to a file
97 # in the current working directory
98 f.write(base64.b64decode(a['content']))
99
100 # Parse the attachment of the testcase to fetch the stdout of the test suite
101 def print_test_output(server, job):
102 content = get_job_bundle_content(server, job)
103 found = False
104
105 for run in content['test_runs']:
106 if run['test_id'] in 'lttng-kernel-test':
107 for attachment in run['attachments']:
108 if attachment['pathname'] in 'stdout.log':
109
110 # Decode the base64 file and split on newlines to iterate
111 # on list
112 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
113
114 # Create a generator to iterate on the lines and keeping
115 # the state of the iterator across the two loops.
116 testoutput_iter = iter(testoutput)
117 for line in testoutput_iter:
118
119 # Find the header of the test case and start printing
120 # from there
121 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
122 found = True
123 print('---- TEST SUITE OUTPUT BEGIN ----')
124 for line in testoutput_iter:
125 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
126 print(line)
127 else:
128 # Print until we reach the end of the
129 # section
130 break
131
132 if found is True:
133 print('----- TEST SUITE OUTPUT END -----')
134 break
135
136 def create_new_job(name, build_device):
137 job = OrderedDict({
138 'health_check': False,
139 'job_name': name,
140 'device_type':build_device,
141 'tags': [ ],
142 'timeout': 18000,
143 'actions': []
144 })
145 if build_device in 'x86':
146 job['tags'].append('dev-sda1')
147
148 return job
149
150 def get_boot_cmd():
151 command = OrderedDict({
152 'command': 'boot_image'
153 })
154 return command
155
156 def get_config_cmd(build_device):
157 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
158 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
159 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
160 'libnuma-dev', 'python3-dev', 'swig', 'stress']
161 command = OrderedDict({
162 'command': 'lava_command_run',
163 'parameters': {
164 'commands': [
165 'cat /etc/resolv.conf',
166 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
167 'groupadd tracing'
168 ],
169 'timeout':300
170 }
171 })
172 if build_device in 'x86':
173 command['parameters']['commands'].extend([
174 'mount /dev/sda1 /tmp',
175 'rm -rf /tmp/*'])
176
177 command['parameters']['commands'].extend([
178 'depmod -a',
179 'locale-gen en_US.UTF-8',
180 'apt-get update',
181 'apt-get upgrade',
182 'apt-get install -y {}'.format(' '.join(packages))
183 ])
184 return command
185
186 def get_baremetal_benchmarks_cmd():
187 command = OrderedDict({
188 'command': 'lava_test_shell',
189 'parameters': {
190 'testdef_repos': [
191 {
192 'git-repo': 'https://github.com/lttng/lttng-ci.git',
193 'revision': 'master',
194 'testdef': 'lava/baremetal-tests/failing-close.yml'
195 },
196 {
197 'git-repo': 'https://github.com/lttng/lttng-ci.git',
198 'revision': 'master',
199 'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
200 },
201 {
202 'git-repo': 'https://github.com/lttng/lttng-ci.git',
203 'revision': 'master',
204 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
205 },
206 {
207 'git-repo': 'https://github.com/lttng/lttng-ci.git',
208 'revision': 'master',
209 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
210 },
211 {
212 'git-repo': 'https://github.com/lttng/lttng-ci.git',
213 'revision': 'master',
214 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml'
215 },
216 {
217 'git-repo': 'https://github.com/lttng/lttng-ci.git',
218 'revision': 'master',
219 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
220 },
221 {
222 'git-repo': 'https://github.com/lttng/lttng-ci.git',
223 'revision': 'master',
224 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
225 }
226 ],
227 'timeout': 18000
228 }
229 })
230 return command
231
232 def get_baremetal_tests_cmd():
233 command = OrderedDict({
234 'command': 'lava_test_shell',
235 'parameters': {
236 'testdef_repos': [
237 {
238 'git-repo': 'https://github.com/lttng/lttng-ci.git',
239 'revision': 'master',
240 'testdef': 'lava/baremetal-tests/perf-tests.yml'
241 }
242 ],
243 'timeout': 18000
244 }
245 })
246 return command
247
248 def get_kvm_tests_cmd():
249 command = OrderedDict({
250 'command': 'lava_test_shell',
251 'parameters': {
252 'testdef_repos': [
253 {
254 'git-repo': 'https://github.com/lttng/lttng-ci.git',
255 'revision': 'master',
256 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
257 },
258 {
259 'git-repo': 'https://github.com/lttng/lttng-ci.git',
260 'revision': 'master',
261 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
262 }
263 ],
264 'timeout': 18000
265 }
266 })
267 return command
268 def get_kprobes_test_cmd():
269 command = OrderedDict({
270 'command': 'lava_test_shell',
271 'parameters': {
272 'testdef_repos': [
273 {
274 'git-repo': 'https://github.com/lttng/lttng-ci.git',
275 'revision': 'master',
276 'testdef': 'lava/baremetal-tests/kprobe-fuzzing-tests.yml'
277 }
278 ],
279 'timeout': 18000
280 }
281 })
282 return command
283
284 def get_results_cmd(stream_name):
285 command = OrderedDict({
286 'command': 'submit_results',
287 'parameters': {
288 'server': 'http://lava-master.internal.efficios.com/RPC2/'
289 }
290 })
291 command['parameters']['stream']='/anonymous/'+stream_name+'/'
292 return command
293
294 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
295 command = OrderedDict({
296 'command': 'deploy_kernel',
297 'metadata': {},
298 'parameters': {
299 'customize': {},
300 'kernel': None,
301 'target_type': 'ubuntu',
302 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
303 'login_prompt': 'kvm02 login:',
304 'username': 'root'
305 }
306 })
307
308 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
309 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
310 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
311 command['metadata']['jenkins_jobname'] = jenkins_job
312
313 return command
314
315 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
316 command = OrderedDict({
317 'command': 'deploy_kernel',
318 'metadata': {},
319 'parameters': {
320 'overlays': [],
321 'kernel': None,
322 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
323 'target_type': 'ubuntu'
324 }
325 })
326
327 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
328 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
329 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
330 command['metadata']['jenkins_jobname'] = jenkins_job
331 if nb_iter is not None:
332 command['metadata']['nb_iterations'] = nb_iter
333
334 return command
335
336
337 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
338 command = OrderedDict({
339 'command': 'lava_command_run',
340 'parameters': {
341 'commands': [
342 'pip3 install --upgrade pip',
343 'hash -r',
344 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
345 'pip3 install vlttng',
346 ],
347 'timeout': 18000
348 }
349 })
350
351 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
352 ' --override projects.babeltrace.build-env.PYTHON=python3' \
353 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
354 ' --profile babeltrace-stable-1.4' \
355 ' --profile babeltrace-python' \
356 ' --profile lttng-tools-master' \
357 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
358 ' --profile lttng-tools-no-man-pages'
359
360 if lttng_ust_commit is not None:
361 vlttng_cmd += ' --profile lttng-ust-master ' \
362 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
363 ' --profile lttng-ust-no-man-pages'
364
365 virtenv_path = None
366 if build_device in 'kvm':
367 virtenv_path = '/root/virtenv'
368 else:
369 virtenv_path = '/tmp/virtenv'
370
371 vlttng_cmd += ' '+virtenv_path
372
373 command['parameters']['commands'].append(vlttng_cmd)
374 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
375 command['parameters']['commands'].append('sync')
376
377 return command
378
379 def main():
380 test_type = None
381 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
382 parser.add_argument('-t', '--type', required=True)
383 parser.add_argument('-j', '--jobname', required=True)
384 parser.add_argument('-k', '--kernel', required=True)
385 parser.add_argument('-km', '--kmodule', required=True)
386 parser.add_argument('-lm', '--lmodule', required=True)
387 parser.add_argument('-tc', '--tools-commit', required=True)
388 parser.add_argument('-uc', '--ust-commit', required=False)
389 args = parser.parse_args()
390
391 if args.type in 'baremetal-benchmarks':
392 test_type = TestType.baremetal_benchmarks
393 elif args.type in 'baremetal-tests':
394 test_type = TestType.baremetal_tests
395 elif args.type in 'kvm-tests':
396 test_type = TestType.kvm_tests
397 else:
398 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
399 return -1
400
401 lava_api_key = None
402 try:
403 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
404 except Exception as e:
405 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
406 return -1
407
408 if test_type is TestType.baremetal_benchmarks:
409 j = create_new_job(args.jobname, build_device='x86')
410 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
411 elif test_type is TestType.baremetal_tests:
412 j = create_new_job(args.jobname, build_device='x86')
413 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
414 elif test_type is TestType.kvm_tests:
415 j = create_new_job(args.jobname, build_device='kvm')
416 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
417
418 j['actions'].append(get_boot_cmd())
419
420 if test_type is TestType.baremetal_benchmarks:
421 j['actions'].append(get_config_cmd('x86'))
422 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
423 j['actions'].append(get_baremetal_benchmarks_cmd())
424 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
425 elif test_type is TestType.baremetal_tests:
426 if args.ust_commit is None:
427 print('Tests runs need -uc/--ust-commit options. Exiting...')
428 return -1
429 j['actions'].append(get_config_cmd('x86'))
430 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
431 j['actions'].append(get_baremetal_tests_cmd())
432 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
433 elif test_type is TestType.kvm_tests:
434 if args.ust_commit is None:
435 print('Tests runs need -uc/--ust-commit options. Exiting...')
436 return -1
437 j['actions'].append(get_config_cmd('kvm'))
438 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
439 j['actions'].append(get_kvm_tests_cmd())
440 j['actions'].append(get_kprobes_test_cmd())
441 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
442 else:
443 assert False, 'Unknown test type'
444
445 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
446
447 jobid = server.scheduler.submit_job(json.dumps(j))
448
449 print('Lava jobid:{}'.format(jobid))
450 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
451
452 #Check the status of the job every 30 seconds
453 jobstatus = server.scheduler.job_status(jobid)['job_status']
454 not_running = False
455 while jobstatus in 'Submitted' or jobstatus in 'Running':
456 if not_running is False and jobstatus in 'Running':
457 print('Job started running')
458 not_running = True
459 time.sleep(30)
460 jobstatus = server.scheduler.job_status(jobid)['job_status']
461
462 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
463 print_test_output(server, jobid)
464 elif test_type is TestType.baremetal_benchmarks:
465 fetch_benchmark_results(server, jobid)
466
467 print('Job ended with {} status.'.format(jobstatus))
468 if jobstatus not in 'Complete':
469 return -1
470 else:
471 passed, failed=check_job_all_test_cases_state_count(server, jobid)
472 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
473
474 if failed == 0:
475 return 0
476 else:
477 return -1
478
479 if __name__ == "__main__":
480 sys.exit(main())
This page took 0.041314 seconds and 4 git commands to generate.