jjb: Pass the events to enable in lava benchmarks
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35
36 def get_job_bundle_content(server, job):
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
42
43 return json.loads(bundle['content'])
44
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
50 passed_tests=0
51 failed_tests=0
52 for run in content['test_runs']:
53 for result in run['test_results']:
54 if 'test_case_id' in result :
55 if result['result'] in 'pass':
56 passed_tests+=1
57 elif result['test_case_id'] in 'wait_for_test_image_prompt':
58 # FIXME:This test is part of the boot action and fails
59 # randomly but doesn't affect the behaviour of the tests.
60 # No reply on the Lava IRC channel yet. We should update
61 # our Lava installation and try to reproduce it. This error
62 # was encountered ont the KVM trusty image only. Not seen
63 # on Xenial at this point.
64 pass
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
73 testcases = ['processed_results_close.csv',
74 'processed_results_open_efault.csv',
75 'processed_results_dup_close.csv']
76
77 # The result bundle is a large JSON containing the results of every testcase
78 # of the LAVA job as well as the files that were attached during the run.
79 # We need to iterate over this JSON to get the base64 representation of the
80 # benchmark results produced during the run.
81 for run in content['test_runs']:
82 # We only care of the benchmark testcases
83 if 'benchmark-syscall-' in run['test_id']:
84 if 'test_results' in run:
85 for res in run['test_results']:
86 if 'attachments' in res:
87 for a in res['attachments']:
88 # We only save the results file
89 if a['pathname'] in testcases:
90 with open(a['pathname'],'wb') as f:
91 # Convert the b64 representation of the
92 # result file and write it to a file
93 # in the current working directory
94 f.write(base64.b64decode(a['content']))
95
96 # Parse the attachment of the testcase to fetch the stdout of the test suite
97 def print_test_output(server, job):
98 content = get_job_bundle_content(server, job)
99 found = False
100
101 for run in content['test_runs']:
102 if run['test_id'] in 'lttng-kernel-test':
103 for attachment in run['attachments']:
104 if attachment['pathname'] in 'stdout.log':
105
106 # Decode the base64 file and split on newlines to iterate
107 # on list
108 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
109
110 # Create a generator to iterate on the lines and keeping
111 # the state of the iterator across the two loops.
112 testoutput_iter = iter(testoutput)
113 for line in testoutput_iter:
114
115 # Find the header of the test case and start printing
116 # from there
117 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
118 found = True
119 print('---- TEST SUITE OUTPUT BEGIN ----')
120 for line in testoutput_iter:
121 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
122 print(line)
123 else:
124 # Print until we reach the end of the
125 # section
126 break
127
128 if found is True:
129 print('----- TEST SUITE OUTPUT END -----')
130 break
131
132 def create_new_job(name, build_device):
133 job = OrderedDict({
134 'health_check': False,
135 'job_name': name,
136 'device_type':build_device,
137 'tags': [ ],
138 'timeout': 18000,
139 'actions': []
140 })
141 if build_device in 'x86':
142 job['tags'].append('dev-sda1')
143
144 return job
145
146 def get_boot_cmd():
147 command = OrderedDict({
148 'command': 'boot_image'
149 })
150 return command
151
152 def get_config_cmd(build_device):
153 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
154 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
155 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
156 'libnuma-dev']
157 command = OrderedDict({
158 'command': 'lava_command_run',
159 'parameters': {
160 'commands': [
161 'cat /etc/resolv.conf',
162 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
163 'groupadd tracing'
164 ],
165 'timeout':300
166 }
167 })
168 if build_device in 'x86':
169 command['parameters']['commands'].extend([
170 'mount /dev/sda1 /tmp',
171 'rm -rf /tmp/*'])
172
173 command['parameters']['commands'].extend([
174 'depmod -a',
175 'locale-gen en_US.UTF-8',
176 'apt-get update',
177 'apt-get upgrade',
178 'apt-get install -y {}'.format(' '.join(packages))
179 ])
180 return command
181
182 def get_baremetal_benchmarks_cmd():
183 command = OrderedDict({
184 'command': 'lava_test_shell',
185 'parameters': {
186 'testdef_repos': [
187 {
188 'git-repo': 'https://github.com/lttng/lttng-ci.git',
189 'revision': 'master',
190 'testdef': 'lava/baremetal-tests/failing-close.yml'
191 },
192 {
193 'git-repo': 'https://github.com/lttng/lttng-ci.git',
194 'revision': 'master',
195 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
196 },
197 {
198 'git-repo': 'https://github.com/lttng/lttng-ci.git',
199 'revision': 'master',
200 'testdef': 'lava/baremetal-tests/success-dup-close.yml'
201 }
202 ],
203 'timeout': 18000
204 }
205 })
206 return command
207
208 def get_baremetal_tests_cmd():
209 command = OrderedDict({
210 'command': 'lava_test_shell',
211 'parameters': {
212 'testdef_repos': [
213 {
214 'git-repo': 'https://github.com/lttng/lttng-ci.git',
215 'revision': 'master',
216 'testdef': 'lava/baremetal-tests/perf-tests.yml'
217 }
218 ],
219 'timeout': 18000
220 }
221 })
222 return command
223
224 def get_kvm_tests_cmd():
225 command = OrderedDict({
226 'command': 'lava_test_shell',
227 'parameters': {
228 'testdef_repos': [
229 {
230 'git-repo': 'https://github.com/lttng/lttng-ci.git',
231 'revision': 'master',
232 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
233 },
234 {
235 'git-repo': 'https://github.com/lttng/lttng-ci.git',
236 'revision': 'master',
237 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
238 }
239 ],
240 'timeout': 18000
241 }
242 })
243 return command
244
245 def get_results_cmd(stream_name):
246 command = OrderedDict({
247 'command': 'submit_results',
248 'parameters': {
249 'server': 'http://lava-master.internal.efficios.com/RPC2/'
250 }
251 })
252 command['parameters']['stream']='/anonymous/'+stream_name+'/'
253 return command
254
255 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
256 command = OrderedDict({
257 'command': 'deploy_kernel',
258 'metadata': {},
259 'parameters': {
260 'customize': {},
261 'kernel': None,
262 'target_type': 'ubuntu',
263 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
264 'login_prompt': 'kvm02 login:',
265 'username': 'root'
266 }
267 })
268
269 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
270 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
271 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
272 command['metadata']['jenkins_jobname'] = jenkins_job
273
274 return command
275
276 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
277 command = OrderedDict({
278 'command': 'deploy_kernel',
279 'metadata': {},
280 'parameters': {
281 'overlays': [],
282 'kernel': None,
283 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
284 'target_type': 'ubuntu'
285 }
286 })
287
288 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
289 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
290 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
291 command['metadata']['jenkins_jobname'] = jenkins_job
292 if nb_iter is not None:
293 command['metadata']['nb_iterations'] = nb_iter
294
295 return command
296
297
298 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
299 command = OrderedDict({
300 'command': 'lava_command_run',
301 'parameters': {
302 'commands': [
303 'pip3 install --upgrade pip',
304 'hash -r',
305 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
306 'pip3 install vlttng',
307 ],
308 'timeout': 18000
309 }
310 })
311
312 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
313 ' --profile babeltrace-stable-1.4 ' \
314 ' --profile lttng-tools-master' \
315 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
316 ' --profile lttng-tools-no-man-pages'
317
318 if lttng_ust_commit is not None:
319 vlttng_cmd += ' --profile lttng-ust-master ' \
320 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
321 ' --profile lttng-ust-no-man-pages'
322
323 virtenv_path = None
324 if build_device in 'kvm':
325 virtenv_path = '/root/virtenv'
326 else:
327 virtenv_path = '/tmp/virtenv'
328
329 vlttng_cmd += ' '+virtenv_path
330
331 command['parameters']['commands'].append(vlttng_cmd)
332 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
333 command['parameters']['commands'].append('sync')
334
335 return command
336
337 def main():
338 test_type = None
339 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
340 parser.add_argument('-t', '--type', required=True)
341 parser.add_argument('-j', '--jobname', required=True)
342 parser.add_argument('-k', '--kernel', required=True)
343 parser.add_argument('-km', '--kmodule', required=True)
344 parser.add_argument('-lm', '--lmodule', required=True)
345 parser.add_argument('-tc', '--tools-commit', required=True)
346 parser.add_argument('-uc', '--ust-commit', required=False)
347 args = parser.parse_args()
348
349 if args.type in 'baremetal-benchmarks':
350 test_type = TestType.baremetal_benchmarks
351 elif args.type in 'baremetal-tests':
352 test_type = TestType.baremetal_tests
353 elif args.type in 'kvm-tests':
354 test_type = TestType.kvm_tests
355 else:
356 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
357 return -1
358
359 lava_api_key = None
360 try:
361 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
362 except Exception as e:
363 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
364 return -1
365
366 if test_type is TestType.baremetal_benchmarks:
367 j = create_new_job(args.jobname, build_device='x86')
368 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
369 elif test_type is TestType.baremetal_tests:
370 j = create_new_job(args.jobname, build_device='x86')
371 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
372 elif test_type is TestType.kvm_tests:
373 j = create_new_job(args.jobname, build_device='kvm')
374 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
375
376 j['actions'].append(get_boot_cmd())
377
378 if test_type is TestType.baremetal_benchmarks:
379 j['actions'].append(get_config_cmd('x86'))
380 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
381 j['actions'].append(get_baremetal_benchmarks_cmd())
382 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
383 elif test_type is TestType.baremetal_tests:
384 if args.ust_commit is None:
385 print('Tests runs need -uc/--ust-commit options. Exiting...')
386 return -1
387 j['actions'].append(get_config_cmd('x86'))
388 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
389 j['actions'].append(get_baremetal_tests_cmd())
390 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
391 elif test_type is TestType.kvm_tests:
392 if args.ust_commit is None:
393 print('Tests runs need -uc/--ust-commit options. Exiting...')
394 return -1
395 j['actions'].append(get_config_cmd('kvm'))
396 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
397 j['actions'].append(get_kvm_tests_cmd())
398 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
399 else:
400 assert False, 'Unknown test type'
401
402 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
403
404 jobid = server.scheduler.submit_job(json.dumps(j))
405
406 print('Lava jobid:{}'.format(jobid))
407 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
408
409 #Check the status of the job every 30 seconds
410 jobstatus = server.scheduler.job_status(jobid)['job_status']
411 not_running = False
412 while jobstatus in 'Submitted' or jobstatus in 'Running':
413 if not_running is False and jobstatus in 'Running':
414 print('Job started running')
415 not_running = True
416 time.sleep(30)
417 jobstatus = server.scheduler.job_status(jobid)['job_status']
418
419 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
420 print_test_output(server, jobid)
421 elif test_type is TestType.baremetal_benchmarks:
422 fetch_benchmark_results(server, jobid)
423
424 print('Job ended with {} status.'.format(jobstatus))
425 if jobstatus not in 'Complete':
426 return -1
427 else:
428 passed, failed=check_job_all_test_cases_state_count(server, jobid)
429 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
430
431 if failed == 0:
432 return 0
433 else:
434 return -1
435
436 if __name__ == "__main__":
437 sys.exit(main())
This page took 0.041878 seconds and 5 git commands to generate.