jjb: Benchmark mainline kernel jobs tracking specific tags/branch on lttng projects
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35
36 def get_job_bundle_content(server, job):
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
42
43 return json.loads(bundle['content'])
44
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
50 passed_tests=0
51 failed_tests=0
52 for run in content['test_runs']:
53 for result in run['test_results']:
54 if 'test_case_id' in result :
55 if result['result'] in 'pass':
56 passed_tests+=1
57 elif result['test_case_id'] in 'wait_for_test_image_prompt':
58 # FIXME:This test is part of the boot action and fails
59 # randomly but doesn't affect the behaviour of the tests.
60 # No reply on the Lava IRC channel yet. We should update
61 # our Lava installation and try to reproduce it. This error
62 # was encountered ont the KVM trusty image only. Not seen
63 # on Xenial at this point.
64 pass
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
73 testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
74
75 # The result bundle is a large JSON containing the results of every testcase
76 # of the LAVA job as well as the files that were attached during the run.
77 # We need to iterate over this JSON to get the base64 representation of the
78 # benchmark results produced during the run.
79 for run in content['test_runs']:
80 # We only care of the benchmark testcases
81 if 'benchmark-syscall-' in run['test_id']:
82 if 'test_results' in run:
83 for res in run['test_results']:
84 if 'attachments' in res:
85 for a in res['attachments']:
86 # We only save the results file
87 if a['pathname'] in testcases:
88 with open(a['pathname'],'wb') as f:
89 # Convert the b64 representation of the
90 # result file and write it to a file
91 # in the current working directory
92 f.write(base64.b64decode(a['content']))
93
94 # Parse the attachment of the testcase to fetch the stdout of the test suite
95 def print_test_output(server, job):
96 content = get_job_bundle_content(server, job)
97 found = False
98
99 for run in content['test_runs']:
100 if run['test_id'] in 'lttng-kernel-test':
101 for attachment in run['attachments']:
102 if attachment['pathname'] in 'stdout.log':
103
104 # Decode the base64 file and split on newlines to iterate
105 # on list
106 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
107
108 # Create a generator to iterate on the lines and keeping
109 # the state of the iterator across the two loops.
110 testoutput_iter = iter(testoutput)
111 for line in testoutput_iter:
112
113 # Find the header of the test case and start printing
114 # from there
115 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
116 found = True
117 print('---- TEST SUITE OUTPUT BEGIN ----')
118 for line in testoutput_iter:
119 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
120 print(line)
121 else:
122 # Print until we reach the end of the
123 # section
124 break
125
126 if found is True:
127 print('----- TEST SUITE OUTPUT END -----')
128 break
129
130 def create_new_job(name, build_device):
131 job = OrderedDict({
132 'health_check': False,
133 'job_name': name,
134 'device_type':build_device,
135 'tags': [ ],
136 'timeout': 18000,
137 'actions': []
138 })
139 if build_device in 'x86':
140 job['tags'].append('dev-sda1')
141
142 return job
143
144 def get_boot_cmd():
145 command = OrderedDict({
146 'command': 'boot_image'
147 })
148 return command
149
150 def get_config_cmd(build_device):
151 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
152 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
153 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
154 'libnuma-dev']
155 command = OrderedDict({
156 'command': 'lava_command_run',
157 'parameters': {
158 'commands': [
159 'cat /etc/resolv.conf',
160 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
161 'groupadd tracing'
162 ],
163 'timeout':300
164 }
165 })
166 if build_device in 'x86':
167 command['parameters']['commands'].extend([
168 'mount /dev/sda1 /tmp',
169 'rm -rf /tmp/*'])
170
171 command['parameters']['commands'].extend([
172 'depmod -a',
173 'locale-gen en_US.UTF-8',
174 'apt-get update',
175 'apt-get upgrade',
176 'apt-get install -y {}'.format(' '.join(packages))
177 ])
178 return command
179
180 def get_baremetal_benchmarks_cmd():
181 command = OrderedDict({
182 'command': 'lava_test_shell',
183 'parameters': {
184 'testdef_repos': [
185 {
186 'git-repo': 'https://github.com/lttng/lttng-ci.git',
187 'revision': 'master',
188 'testdef': 'lava/baremetal-tests/failing-close.yml'
189 },
190 {
191 'git-repo': 'https://github.com/lttng/lttng-ci.git',
192 'revision': 'master',
193 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
194 },
195 {
196 'git-repo': 'https://github.com/lttng/lttng-ci.git',
197 'revision': 'master',
198 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
199 }
200 ],
201 'timeout': 18000
202 }
203 })
204 return command
205
206 def get_baremetal_tests_cmd():
207 command = OrderedDict({
208 'command': 'lava_test_shell',
209 'parameters': {
210 'testdef_repos': [
211 {
212 'git-repo': 'https://github.com/lttng/lttng-ci.git',
213 'revision': 'master',
214 'testdef': 'lava/baremetal-tests/perf-tests.yml'
215 }
216 ],
217 'timeout': 18000
218 }
219 })
220 return command
221
222 def get_kvm_tests_cmd():
223 command = OrderedDict({
224 'command': 'lava_test_shell',
225 'parameters': {
226 'testdef_repos': [
227 {
228 'git-repo': 'https://github.com/lttng/lttng-ci.git',
229 'revision': 'master',
230 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
231 },
232 {
233 'git-repo': 'https://github.com/lttng/lttng-ci.git',
234 'revision': 'master',
235 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
236 }
237 ],
238 'timeout': 18000
239 }
240 })
241 return command
242
243 def get_results_cmd(stream_name):
244 command = OrderedDict({
245 'command': 'submit_results',
246 'parameters': {
247 'server': 'http://lava-master.internal.efficios.com/RPC2/'
248 }
249 })
250 command['parameters']['stream']='/anonymous/'+stream_name+'/'
251 return command
252
253 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
254 command = OrderedDict({
255 'command': 'deploy_kernel',
256 'metadata': {},
257 'parameters': {
258 'customize': {},
259 'kernel': None,
260 'target_type': 'ubuntu',
261 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
262 'login_prompt': 'kvm02 login:',
263 'username': 'root'
264 }
265 })
266
267 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
268 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
269 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
270 command['metadata']['jenkins_jobname'] = jenkins_job
271
272 return command
273
274 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
275 command = OrderedDict({
276 'command': 'deploy_kernel',
277 'metadata': {},
278 'parameters': {
279 'overlays': [],
280 'kernel': None,
281 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
282 'target_type': 'ubuntu'
283 }
284 })
285
286 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
287 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
288 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
289 command['metadata']['jenkins_jobname'] = jenkins_job
290 if nb_iter is not None:
291 command['metadata']['nb_iterations'] = nb_iter
292
293 return command
294
295
296 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
297 command = OrderedDict({
298 'command': 'lava_command_run',
299 'parameters': {
300 'commands': [
301 'pip3 install --upgrade pip',
302 'hash -r',
303 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
304 'pip3 install vlttng',
305 ],
306 'timeout': 18000
307 }
308 })
309
310 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
311 ' --profile babeltrace-stable-1.4 ' \
312 ' --profile lttng-tools-master' \
313 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
314 ' --profile lttng-tools-no-man-pages'
315
316 if lttng_ust_commit is not None:
317 vlttng_cmd += ' --profile lttng-ust-master ' \
318 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
319 ' --profile lttng-ust-no-man-pages'
320
321 virtenv_path = None
322 if build_device in 'kvm':
323 virtenv_path = '/root/virtenv'
324 else:
325 virtenv_path = '/tmp/virtenv'
326
327 vlttng_cmd += ' '+virtenv_path
328
329 command['parameters']['commands'].append(vlttng_cmd)
330 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
331 command['parameters']['commands'].append('sync')
332
333 return command
334
335 def main():
336 test_type = None
337 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
338 parser.add_argument('-t', '--type', required=True)
339 parser.add_argument('-j', '--jobname', required=True)
340 parser.add_argument('-k', '--kernel', required=True)
341 parser.add_argument('-km', '--kmodule', required=True)
342 parser.add_argument('-lm', '--lmodule', required=True)
343 parser.add_argument('-tc', '--tools-commit', required=True)
344 parser.add_argument('-uc', '--ust-commit', required=False)
345 args = parser.parse_args()
346
347 if args.type in 'baremetal-benchmarks':
348 test_type = TestType.baremetal_benchmarks
349 elif args.type in 'baremetal-tests':
350 test_type = TestType.baremetal_tests
351 elif args.type in 'kvm-tests':
352 test_type = TestType.kvm_tests
353 else:
354 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
355 return -1
356
357 lava_api_key = None
358 try:
359 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
360 except Exception as e:
361 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
362 return -1
363
364 if test_type is TestType.baremetal_benchmarks:
365 j = create_new_job(args.jobname, build_device='x86')
366 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
367 elif test_type is TestType.baremetal_tests:
368 j = create_new_job(args.jobname, build_device='x86')
369 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
370 elif test_type is TestType.kvm_tests:
371 j = create_new_job(args.jobname, build_device='kvm')
372 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
373
374 j['actions'].append(get_boot_cmd())
375
376 if test_type is TestType.baremetal_benchmarks:
377 j['actions'].append(get_config_cmd('x86'))
378 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
379 j['actions'].append(get_baremetal_benchmarks_cmd())
380 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
381 elif test_type is TestType.baremetal_tests:
382 if args.ust_commit is None:
383 print('Tests runs need -uc/--ust-commit options. Exiting...')
384 return -1
385 j['actions'].append(get_config_cmd('x86'))
386 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
387 j['actions'].append(get_baremetal_tests_cmd())
388 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
389 elif test_type is TestType.kvm_tests:
390 if args.ust_commit is None:
391 print('Tests runs need -uc/--ust-commit options. Exiting...')
392 return -1
393 j['actions'].append(get_config_cmd('kvm'))
394 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
395 j['actions'].append(get_kvm_tests_cmd())
396 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
397 else:
398 assert False, 'Unknown test type'
399
400 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
401
402 jobid = server.scheduler.submit_job(json.dumps(j))
403
404 print('Lava jobid:{}'.format(jobid))
405 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
406
407 #Check the status of the job every 30 seconds
408 jobstatus = server.scheduler.job_status(jobid)['job_status']
409 not_running = False
410 while jobstatus in 'Submitted' or jobstatus in 'Running':
411 if not_running is False and jobstatus in 'Running':
412 print('Job started running')
413 not_running = True
414 time.sleep(30)
415 jobstatus = server.scheduler.job_status(jobid)['job_status']
416
417 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
418 print_test_output(server, jobid)
419 elif test_type is TestType.baremetal_benchmarks:
420 fetch_benchmark_results(server, jobid)
421
422 print('Job ended with {} status.'.format(jobstatus))
423 if jobstatus not in 'Complete':
424 return -1
425 else:
426 passed, failed=check_job_all_test_cases_state_count(server, jobid)
427 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
428
429 if failed == 0:
430 return 0
431 else:
432 return -1
433
434 if __name__ == "__main__":
435 sys.exit(main())
This page took 0.054447 seconds and 5 git commands to generate.