Lava: Install libnuma to build benchmarks
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpc.client
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 baremetal_benchmarks=1
33 baremetal_tests=2
34 kvm_tests=3
35
36 def get_job_bundle_content(server, job):
37 try:
38 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
39 bundle = server.dashboard.get(bundle_sha)
40 except xmlrpc.client.Fault as f:
41 print('Error while fetching results bundle', f.faultString)
42
43 return json.loads(bundle['content'])
44
45 # Parse the results bundle to see the run-tests testcase
46 # of the lttng-kernel-tests passed successfully
47 def check_job_all_test_cases_state_count(server, job):
48 content = get_job_bundle_content(server, job)
49
50 passed_tests=0
51 failed_tests=0
52 for run in content['test_runs']:
53 for result in run['test_results']:
54 if 'test_case_id' in result :
55 if result['result'] in 'pass':
56 passed_tests+=1
57 elif result['test_case_id'] in 'wait_for_test_image_prompt':
58 # FIXME:This test is part of the boot action and fails
59 # randomly but doesn't affect the behaviour of the tests.
60 # No reply on the Lava IRC channel yet. We should update
61 # our Lava installation and try to reproduce it. This error
62 # was encountered ont the KVM trusty image only. Not seen
63 # on Xenial at this point.
64 pass
65 else:
66 failed_tests+=1
67 return (passed_tests, failed_tests)
68
69 # Get the benchmark results from the lava bundle
70 # save them as CSV files localy
71 def fetch_benchmark_results(server, job):
72 content = get_job_bundle_content(server, job)
73 testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
74
75 # The result bundle is a large JSON containing the results of every testcase
76 # of the LAVA job as well as the files that were attached during the run.
77 # We need to iterate over this JSON to get the base64 representation of the
78 # benchmark results produced during the run.
79 for run in content['test_runs']:
80 # We only care of the benchmark testcases
81 if 'benchmark-syscall-' in run['test_id']:
82 if 'test_results' in run:
83 for res in run['test_results']:
84 if 'attachments' in res:
85 for a in res['attachments']:
86 # We only save the results file
87 if a['pathname'] in testcases:
88 with open(a['pathname'],'wb') as f:
89 # Convert the b64 representation of the
90 # result file and write it to a file
91 # in the current working directory
92 f.write(base64.b64decode(a['content']))
93
94 # Parse the attachment of the testcase to fetch the stdout of the test suite
95 def print_test_output(server, job):
96 content = get_job_bundle_content(server, job)
97 found = False
98
99 for run in content['test_runs']:
100 if run['test_id'] in 'lttng-kernel-test':
101 for attachment in run['attachments']:
102 if attachment['pathname'] in 'stdout.log':
103
104 # Decode the base64 file and split on newlines to iterate
105 # on list
106 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8'))).split('\n')
107
108 # Create a generator to iterate on the lines and keeping
109 # the state of the iterator across the two loops.
110 testoutput_iter = iter(testoutput)
111 for line in testoutput_iter:
112
113 # Find the header of the test case and start printing
114 # from there
115 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
116 found = True
117 print('---- TEST SUITE OUTPUT BEGIN ----')
118 for line in testoutput_iter:
119 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
120 print(line)
121 else:
122 # Print until we reach the end of the
123 # section
124 break
125
126 if found is True:
127 print('----- TEST SUITE OUTPUT END -----')
128 break
129
130 def create_new_job(name, build_device):
131 job = OrderedDict({
132 'health_check': False,
133 'job_name': name,
134 'device_type':build_device,
135 'tags': [ ],
136 'timeout': 18000,
137 'actions': []
138 })
139 if build_device in 'x86':
140 job['tags'].append('dev-sda1')
141
142 return job
143
144 def get_boot_cmd():
145 command = OrderedDict({
146 'command': 'boot_image'
147 })
148 return command
149
150 def get_config_cmd(build_device):
151 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
152 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
153 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
154 'libnuma-dev']
155 command = OrderedDict({
156 'command': 'lava_command_run',
157 'parameters': {
158 'commands': [
159 'cat /etc/resolv.conf',
160 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
161 'groupadd tracing'
162 ]
163 }
164 })
165 if build_device in 'x86':
166 command['parameters']['commands'].extend([
167 'mount /dev/sda1 /tmp',
168 'rm -rf /tmp/*'])
169
170 command['parameters']['commands'].extend([
171 'depmod -a',
172 'locale-gen en_US.UTF-8',
173 'apt-get update',
174 'apt-get upgrade',
175 'apt-get install -y {}'.format(' '.join(packages))
176 ])
177 return command
178
179 def get_baremetal_benchmarks_cmd():
180 command = OrderedDict({
181 'command': 'lava_test_shell',
182 'parameters': {
183 'testdef_repos': [
184 {
185 'git-repo': 'https://github.com/lttng/lttng-ci.git',
186 'revision': 'master',
187 'testdef': 'lava/baremetal-tests/failing-close.yml'
188 },
189 {
190 'git-repo': 'https://github.com/lttng/lttng-ci.git',
191 'revision': 'master',
192 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
193 },
194 {
195 'git-repo': 'https://github.com/lttng/lttng-ci.git',
196 'revision': 'master',
197 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
198 }
199 ],
200 'timeout': 18000
201 }
202 })
203 return command
204
205 def get_baremetal_tests_cmd():
206 command = OrderedDict({
207 'command': 'lava_test_shell',
208 'parameters': {
209 'testdef_repos': [
210 {
211 'git-repo': 'https://github.com/lttng/lttng-ci.git',
212 'revision': 'master',
213 'testdef': 'lava/baremetal-tests/perf-tests.yml'
214 }
215 ],
216 'timeout': 18000
217 }
218 })
219 return command
220
221 def get_kvm_tests_cmd():
222 command = OrderedDict({
223 'command': 'lava_test_shell',
224 'parameters': {
225 'testdef_repos': [
226 {
227 'git-repo': 'https://github.com/lttng/lttng-ci.git',
228 'revision': 'master',
229 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
230 },
231 {
232 'git-repo': 'https://github.com/lttng/lttng-ci.git',
233 'revision': 'master',
234 'testdef': 'lava/baremetal-tests/destructive-tests.yml'
235 }
236 ],
237 'timeout': 18000
238 }
239 })
240 return command
241
242 def get_results_cmd(stream_name):
243 command = OrderedDict({
244 'command': 'submit_results',
245 'parameters': {
246 'server': 'http://lava-master.internal.efficios.com/RPC2/'
247 }
248 })
249 command['parameters']['stream']='/anonymous/'+stream_name+'/'
250 return command
251
252 def get_deploy_cmd_kvm(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path):
253 command = OrderedDict({
254 'command': 'deploy_kernel',
255 'metadata': {},
256 'parameters': {
257 'customize': {},
258 'kernel': None,
259 'target_type': 'ubuntu',
260 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
261 'login_prompt': 'kvm02 login:',
262 'username': 'root'
263 }
264 })
265
266 command['parameters']['customize'][SCP_PATH+linux_modules_path]=['rootfs:/','archive']
267 command['parameters']['customize'][SCP_PATH+lttng_modules_path]=['rootfs:/','archive']
268 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
269 command['metadata']['jenkins_jobname'] = jenkins_job
270
271 return command
272
273 def get_deploy_cmd_x86(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
274 command = OrderedDict({
275 'command': 'deploy_kernel',
276 'metadata': {},
277 'parameters': {
278 'overlays': [],
279 'kernel': None,
280 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
281 'target_type': 'ubuntu'
282 }
283 })
284
285 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
286 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
287 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
288 command['metadata']['jenkins_jobname'] = jenkins_job
289 if nb_iter is not None:
290 command['metadata']['nb_iterations'] = nb_iter
291
292 return command
293
294
295 def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None):
296 command = OrderedDict({
297 'command': 'lava_command_run',
298 'parameters': {
299 'commands': [
300 'pip3 install --upgrade pip',
301 'hash -r',
302 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
303 'pip3 install vlttng',
304 ],
305 'timeout': 18000
306 }
307 })
308
309 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
310 ' --profile babeltrace-stable-1.4 ' \
311 ' --profile lttng-tools-master' \
312 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
313 ' --profile lttng-tools-no-man-pages'
314
315 if lttng_ust_commit is not None:
316 vlttng_cmd += ' --profile lttng-ust-master ' \
317 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
318 ' --profile lttng-ust-no-man-pages'
319
320 virtenv_path = None
321 if build_device in 'kvm':
322 virtenv_path = '/root/virtenv'
323 else:
324 virtenv_path = '/tmp/virtenv'
325
326 vlttng_cmd += ' '+virtenv_path
327
328 command['parameters']['commands'].append(vlttng_cmd)
329 command['parameters']['commands'].append('ln -s '+virtenv_path+' /root/lttngvenv')
330 command['parameters']['commands'].append('sync')
331
332 return command
333
334 def main():
335 test_type = None
336 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
337 parser.add_argument('-t', '--type', required=True)
338 parser.add_argument('-j', '--jobname', required=True)
339 parser.add_argument('-k', '--kernel', required=True)
340 parser.add_argument('-km', '--kmodule', required=True)
341 parser.add_argument('-lm', '--lmodule', required=True)
342 parser.add_argument('-tc', '--tools-commit', required=True)
343 parser.add_argument('-uc', '--ust-commit', required=False)
344 args = parser.parse_args()
345
346 if args.type in 'baremetal-benchmarks':
347 test_type = TestType.baremetal_benchmarks
348 elif args.type in 'baremetal-tests':
349 test_type = TestType.baremetal_tests
350 elif args.type in 'kvm-tests':
351 test_type = TestType.kvm_tests
352 else:
353 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
354 return -1
355
356 lava_api_key = None
357 try:
358 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
359 except Exception as e:
360 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
361 return -1
362
363 if test_type is TestType.baremetal_benchmarks:
364 j = create_new_job(args.jobname, build_device='x86')
365 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
366 elif test_type is TestType.baremetal_tests:
367 j = create_new_job(args.jobname, build_device='x86')
368 j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
369 elif test_type is TestType.kvm_tests:
370 j = create_new_job(args.jobname, build_device='kvm')
371 j['actions'].append(get_deploy_cmd_kvm(args.jobname, args.kernel, args.kmodule, args.lmodule))
372
373 j['actions'].append(get_boot_cmd())
374
375 if test_type is TestType.baremetal_benchmarks:
376 j['actions'].append(get_config_cmd('x86'))
377 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
378 j['actions'].append(get_baremetal_benchmarks_cmd())
379 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
380 elif test_type is TestType.baremetal_tests:
381 if args.ust_commit is None:
382 print('Tests runs need -uc/--ust-commit options. Exiting...')
383 return -1
384 j['actions'].append(get_config_cmd('x86'))
385 j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
386 j['actions'].append(get_baremetal_tests_cmd())
387 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
388 elif test_type is TestType.kvm_tests:
389 if args.ust_commit is None:
390 print('Tests runs need -uc/--ust-commit options. Exiting...')
391 return -1
392 j['actions'].append(get_config_cmd('kvm'))
393 j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
394 j['actions'].append(get_kvm_tests_cmd())
395 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
396 else:
397 assert False, 'Unknown test type'
398
399 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
400
401 jobid = server.scheduler.submit_job(json.dumps(j))
402
403 print('Lava jobid:{}'.format(jobid))
404
405 #Check the status of the job every 30 seconds
406 jobstatus = server.scheduler.job_status(jobid)['job_status']
407 not_running = False
408 while jobstatus in 'Submitted' or jobstatus in 'Running':
409 if not_running is False and jobstatus in 'Running':
410 print('Job started running')
411 not_running = True
412 time.sleep(30)
413 jobstatus = server.scheduler.job_status(jobid)['job_status']
414
415 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
416 print_test_output(server, jobid)
417 elif test_type is TestType.baremetal_benchmarks:
418 fetch_benchmark_results(server, jobid)
419
420 print('Job ended with {} status.'.format(jobstatus))
421 if jobstatus not in 'Complete':
422 return -1
423 else:
424 passed, failed=check_job_all_test_cases_state_count(server, jobid)
425 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
426
427 if failed == 0:
428 return 0
429 else:
430 return -1
431
432 if __name__ == "__main__":
433 sys.exit(main())
This page took 0.040631 seconds and 5 git commands to generate.