jjb: Build the Babeltrace Python bindings in Lava jobs
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
... / ...
CommitLineData
1#!/usr/bin/python
2# Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3#
4# This program is free software: you can redistribute it and/or modify
5# it under the terms of the GNU General Public License as published by
6# the Free Software Foundation, either version 3 of the License, or
7# (at your option) any later version.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
13#
14# You should have received a copy of the GNU General Public License
15# along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17import argparse
18import base64
19import json
20import os
21import sys
22import time
23import xmlrpc.client
24from collections import OrderedDict
25from enum import Enum
26
27USERNAME = 'frdeso'
28HOSTNAME = 'lava-master.internal.efficios.com'
29SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35
36def get_job_bundle_content(server, job):
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
42
43 return json.loads(bundle['content'])
44
45# Parse the results bundle to see the run-tests testcase
46# of the lttng-kernel-tests passed successfully
47def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
50 # FIXME:Those tests are part of the boot actions and fail randomly but
51 # doesn't affect the behaviour of the tests. We should update our Lava
52 # installation and try to reproduce it. This error was encountered on
53 # Ubuntu 16.04.
54 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
55
56 passed_tests=0
57 failed_tests=0
58 for run in content['test_runs']:
59 for result in run['test_results']:
60 if 'test_case_id' in result :
61 if result['result'] in 'pass':
62 passed_tests+=1
63 elif result['test_case_id'] in tests_known_to_fail:
64 pass
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
69# Get the benchmark results from the lava bundle
70# save them as CSV files localy
71def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
73 testcases = ['processed_results_close.csv',
74 'processed_results_ioctl.csv',
75 'processed_results_open_efault.csv',
76 'processed_results_open_enoent.csv',
77 'processed_results_dup_close.csv',
78 'processed_results_raw_syscall_getpid.csv',
79 'processed_results_lttng_test_filter.csv']
80
81 # The result bundle is a large JSON containing the results of every testcase
82 # of the LAVA job as well as the files that were attached during the run.
83 # We need to iterate over this JSON to get the base64 representation of the
84 # benchmark results produced during the run.
85 for run in content['test_runs']:
86 # We only care of the benchmark testcases
87 if 'benchmark-' in run['test_id']:
88 if 'test_results' in run:
89 for res in run['test_results']:
90 if 'attachments' in res:
91 for a in res['attachments']:
92 # We only save the results file
93 if a['pathname'] in testcases:
94 with open(a['pathname'],'wb') as f:
95 # Convert the b64 representation of the
96 # result file and write it to a file
97 # in the current working directory
98 f.write(base64.b64decode(a['content']))
99
100# Parse the attachment of the testcase to fetch the stdout of the test suite
101def print_test_output(server, job):
102 content = get_job_bundle_content(server, job)
103 found = False
104
105 for run in content['test_runs']:
106 if run['test_id'] in 'lttng-kernel-test':
107 for attachment in run['attachments']:
108 if attachment['pathname'] in 'stdout.log':
109
110 # Decode the base64 file and split on newlines to iterate
111 # on list
112 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
113
114 # Create a generator to iterate on the lines and keeping
115 # the state of the iterator across the two loops.
116 testoutput_iter = iter(testoutput)
117 for line in testoutput_iter:
118
119 # Find the header of the test case and start printing
120 # from there
121 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
122 found = True
123 print('---- TEST SUITE OUTPUT BEGIN ----')
124 for line in testoutput_iter:
125 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
126 print(line)
127 else:
128 # Print until we reach the end of the
129 # section
130 break
131
132 if found is True:
133 print('----- TEST SUITE OUTPUT END -----')
134 break
135
136def create_new_job(name, build_device):
137 job = OrderedDict({
138 'health_check': False,
139 'job_name': name,
140 'device_type':build_device,
141 'tags': [ ],
142 'timeout': 18000,
143 'actions': []
144 })
145 if build_device in 'x86':
146 job['tags'].append('dev-sda1')
147
148 return job
149
150def get_boot_cmd():
151 command = OrderedDict({
152 'command': 'boot_image'
153 })
154 return command
155
156def get_config_cmd(build_device):
157 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
158 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
159 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
160 'libnuma-dev', 'python3-dev']
161 command = OrderedDict({
162 'command': 'lava_command_run',
163 'parameters': {
164 'commands': [
165 'cat /etc/resolv.conf',
166 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
167 'groupadd tracing'
168 ],
169 'timeout':300
170 }
171 })
172 if build_device in 'x86':
173 command['parameters']['commands'].extend([
174 'mount /dev/sda1 /tmp',
175 'rm -rf /tmp/*'])
176
177 command['parameters']['commands'].extend([
178 'depmod -a',
179 'locale-gen en_US.UTF-8',
180 'apt-get update',
181 'apt-get upgrade',
182 'apt-get install -y {}'.format(' '.join(packages))
183 ])
184 return command
185
186def get_baremetal_benchmarks_cmd():
187 command = OrderedDict({
188 'command': 'lava_test_shell',
189 'parameters': {
190 'testdef_repos': [
191 {
192 'git-repo': 'https://github.com/lttng/lttng-ci.git',
193 'revision': 'master',
194 'testdef': 'lava/baremetal-tests/failing-close.yml'
195 },
196 {
197 'git-repo': 'https://github.com/lttng/lttng-ci.git',
198 'revision': 'master',
199 'testdef': 'lava/baremetal-tests/failing-ioctl.yml'
200 },
201 {
202 'git-repo': 'https://github.com/lttng/lttng-ci.git',
203 'revision': 'master',
204 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
205 },
206 {
207 'git-repo': 'https://github.com/lttng/lttng-ci.git',
208 'revision': 'master',
209 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
210 },
211 {
212 'git-repo': 'https://github.com/lttng/lttng-ci.git',
213 'revision': 'master',
214 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml'
215 },
216 {
217 'git-repo': 'https://github.com/lttng/lttng-ci.git',
218 'revision': 'master',
219 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
220 },
221 {
222 'git-repo': 'https://github.com/lttng/lttng-ci.git',
223 'revision': 'master',
224 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml'
225 }
226 ],
227 'timeout': 18000
228 }
229 })
230 return command
231
232def get_baremetal_tests_cmd():
233 command = OrderedDict({
234 'command': 'lava_test_shell',
235 'parameters': {
236 'testdef_repos': [
237 {
238 'git-repo': 'https://github.com/lttng/lttng-ci.git',
239 'revision': 'master',
240 'testdef': 'lava/baremetal-tests/perf-tests.yml'
241 }
242 ],
243 'timeout': 18000
244 }
245 })
246 return command
247
248def get_kvm_tests_cmd():
249 command = OrderedDict({
250 'command': 'lava_test_shell',
251 'parameters': {
252 'testdef_repos': [
253 {
254 'git-repo': 'https://github.com/lttng/lttng-ci.git',
255 'revision': 'master',
256 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
257 },
258 {
259 'git-repo': 'https://github.com/lttng/lttng-ci.git',
260 'revision': 'master',
261 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
262 }
263 ],
264 'timeout': 18000
265 }
266 })
267 return command
268
269def get_results_cmd(stream_name):
270 command = OrderedDict({
271 'command': 'submit_results',
272 'parameters': {
273 'server': 'http://lava-master.internal.efficios.com/RPC2/'
274 }
275 })
276 command['parameters']['stream']='/anonymous/'+stream_name+'/'
277 return command
278
279def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
280 command = OrderedDict({
281 'command': 'deploy_kernel',
282 'metadata': {},
283 'parameters': {
284 'customize': {},
285 'kernel': None,
286 'target_type': 'ubuntu',
287 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
288 'login_prompt': 'kvm02 login:',
289 'username': 'root'
290 }
291 })
292
293 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
294 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
295 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
296 command['metadata']['jenkins_jobname'] = jenkins_job
297
298 return command
299
300def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
301 command = OrderedDict({
302 'command': 'deploy_kernel',
303 'metadata': {},
304 'parameters': {
305 'overlays': [],
306 'kernel': None,
307 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
308 'target_type': 'ubuntu'
309 }
310 })
311
312 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
313 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
314 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
315 command['metadata']['jenkins_jobname'] = jenkins_job
316 if nb_iter is not None:
317 command['metadata']['nb_iterations'] = nb_iter
318
319 return command
320
321
322def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
323 command = OrderedDict({
324 'command': 'lava_command_run',
325 'parameters': {
326 'commands': [
327 'pip3 install --upgrade pip',
328 'hash -r',
329 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
330 'pip3 install vlttng',
331 ],
332 'timeout': 18000
333 }
334 })
335
336 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
337 ' --override projects.babeltrace.build-env.PYTHON=python3' \
338 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
339 ' --profile babeltrace-stable-1.4' \
340 ' --profile babeltrace-python' \
341 ' --profile lttng-tools-master' \
342 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
343 ' --profile lttng-tools-no-man-pages'
344
345 if lttng_ust_commit is not None:
346 vlttng_cmd += ' --profile lttng-ust-master ' \
347 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
348 ' --profile lttng-ust-no-man-pages'
349
350 virtenv_path = None
351 if build_device in 'kvm':
352 virtenv_path = '/root/virtenv'
353 else:
354 virtenv_path = '/tmp/virtenv'
355
356 vlttng_cmd += ' '+virtenv_path
357
358 command['parameters']['commands'].append(vlttng_cmd)
359 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
360 command['parameters']['commands'].append('sync')
361
362 return command
363
364def main():
365 test_type = None
366 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
367 parser.add_argument('-t', '--type', required=True)
368 parser.add_argument('-j', '--jobname', required=True)
369 parser.add_argument('-k', '--kernel', required=True)
370 parser.add_argument('-km', '--kmodule', required=True)
371 parser.add_argument('-lm', '--lmodule', required=True)
372 parser.add_argument('-tc', '--tools-commit', required=True)
373 parser.add_argument('-uc', '--ust-commit', required=False)
374 args = parser.parse_args()
375
376 if args.type in 'baremetal-benchmarks':
377 test_type = TestType.baremetal_benchmarks
378 elif args.type in 'baremetal-tests':
379 test_type = TestType.baremetal_tests
380 elif args.type in 'kvm-tests':
381 test_type = TestType.kvm_tests
382 else:
383 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
384 return -1
385
386 lava_api_key = None
387 try:
388 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
389 except Exception as e:
390 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
391 return -1
392
393 if test_type is TestType.baremetal_benchmarks:
394 j = create_new_job(args.jobname, build_device='x86')
395 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
396 elif test_type is TestType.baremetal_tests:
397 j = create_new_job(args.jobname, build_device='x86')
398 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
399 elif test_type is TestType.kvm_tests:
400 j = create_new_job(args.jobname, build_device='kvm')
401 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
402
403 j['actions'].append(get_boot_cmd())
404
405 if test_type is TestType.baremetal_benchmarks:
406 j['actions'].append(get_config_cmd('x86'))
407 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
408 j['actions'].append(get_baremetal_benchmarks_cmd())
409 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
410 elif test_type is TestType.baremetal_tests:
411 if args.ust_commit is None:
412 print('Tests runs need -uc/--ust-commit options. Exiting...')
413 return -1
414 j['actions'].append(get_config_cmd('x86'))
415 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
416 j['actions'].append(get_baremetal_tests_cmd())
417 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
418 elif test_type is TestType.kvm_tests:
419 if args.ust_commit is None:
420 print('Tests runs need -uc/--ust-commit options. Exiting...')
421 return -1
422 j['actions'].append(get_config_cmd('kvm'))
423 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
424 j['actions'].append(get_kvm_tests_cmd())
425 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
426 else:
427 assert False, 'Unknown test type'
428
429 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
430
431 jobid = server.scheduler.submit_job(json.dumps(j))
432
433 print('Lava jobid:{}'.format(jobid))
434 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
435
436 #Check the status of the job every 30 seconds
437 jobstatus = server.scheduler.job_status(jobid)['job_status']
438 not_running = False
439 while jobstatus in 'Submitted' or jobstatus in 'Running':
440 if not_running is False and jobstatus in 'Running':
441 print('Job started running')
442 not_running = True
443 time.sleep(30)
444 jobstatus = server.scheduler.job_status(jobid)['job_status']
445
446 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
447 print_test_output(server, jobid)
448 elif test_type is TestType.baremetal_benchmarks:
449 fetch_benchmark_results(server, jobid)
450
451 print('Job ended with {} status.'.format(jobstatus))
452 if jobstatus not in 'Complete':
453 return -1
454 else:
455 passed, failed=check_job_all_test_cases_state_count(server, jobid)
456 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
457
458 if failed == 0:
459 return 0
460 else:
461 return -1
462
463if __name__ == "__main__":
464 sys.exit(main())
This page took 0.03623 seconds and 4 git commands to generate.