Add Jenkins and job results parsing scripts
[lttng-ci.git] / scripts / lttng-baremetal-tests / lava-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import sys
22 import time
23 import xmlrpclib
24 from collections import OrderedDict
25 from enum import Enum
26
27 USERNAME = 'frdeso'
28 HOSTNAME = 'lava-master.internal.efficios.com'
29 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
30
31 class TestType(Enum):
32 benchmarks=1
33 tests=2
34
35 def get_job_bundle_content(server, job):
36 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
37 bundle = server.dashboard.get(bundle_sha)
38
39 return json.loads(bundle['content'])
40
41 # Parse the results bundle to see the run-tests testcase
42 # of the lttng-kernel-tests passed successfully
43 def check_job_all_test_cases_state_count(server, job):
44 content = get_job_bundle_content(server, job)
45
46 passed_tests=0
47 failed_tests=0
48 for run in content['test_runs']:
49 for result in run['test_results']:
50 if 'test_case_id' in result:
51 if result['result'] in 'pass':
52 passed_tests+=1
53 else:
54 failed_tests+=1
55 return (passed_tests, failed_tests)
56
57 # Parse the attachment of the testcase to fetch the stdout of the test suite
58 def print_test_output(server, job):
59 content = get_job_bundle_content(server, job)
60 found = False
61
62 for run in content['test_runs']:
63 if run['test_id'] in 'lttng-kernel-test':
64 for attachment in run['attachments']:
65 if attachment['pathname'] in 'stdout.log':
66
67 # Decode the base64 file and split on newlines to iterate
68 # on list
69 testoutput = base64.b64decode(attachment['content']).split('\n')
70
71 # Create a generator to iterate on the lines and keeping
72 # the state of the iterator across the two loops.
73 testoutput_iter = iter(testoutput)
74 for line in testoutput_iter:
75
76 # Find the header of the test case and start printing
77 # from there
78 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
79 found = True
80 print('---- TEST SUITE OUTPUT BEGIN ----')
81 for line in testoutput_iter:
82 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
83 print(line)
84 else:
85 # Print until we reach the end of the
86 # section
87 break
88
89 if found is True:
90 print('----- TEST SUITE OUTPUT END -----')
91 break
92
93 def create_new_job(name):
94 job = OrderedDict({
95 'health_check': False,
96 'job_name': name,
97 'device_type': 'x86',
98 'tags': [ 'dev-sda1' ],
99 'timeout': 18000,
100 'actions': []
101 })
102 return job
103
104 def get_boot_cmd():
105 command = OrderedDict({
106 'command': 'boot_image'
107 })
108 return command
109
110 def get_config_cmd():
111 packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
112 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
113 'libelf-dev', 'libmount-dev', 'libxml2', 'python3-pandas', \
114 'python3-numpy']
115 command = OrderedDict({
116 'command': 'lava_command_run',
117 'parameters': {
118 'commands': [
119 'ifup eth0',
120 'route -n',
121 'cat /etc/resolv.conf',
122 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
123 'mount /dev/sda1 /tmp',
124 'rm -rf /tmp/*',
125 'depmod -a',
126 'locale-gen en_US.UTF-8',
127 'apt-get update',
128 'apt-get install -y {}'.format(' '.join(packages)),
129 ]
130 }
131 })
132 return command
133
134 def get_benchmarks_cmd():
135 command = OrderedDict({
136 'command': 'lava_test_shell',
137 'parameters': {
138 'testdef_repos': [
139 {
140 'git-repo': 'https://github.com/lttng/lttng-ci.git',
141 'revision': 'master',
142 'testdef': 'lava/baremetal-tests/failing-close.yml'
143 },
144 {
145 'git-repo': 'https://github.com/lttng/lttng-ci.git',
146 'revision': 'master',
147 'testdef': 'lava/baremetal-tests/failing-open-efault.yml'
148 },
149 {
150 'git-repo': 'https://github.com/lttng/lttng-ci.git',
151 'revision': 'master',
152 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml'
153 }
154 ],
155 'timeout': 18000
156 }
157 })
158 return command
159
160 def get_tests_cmd():
161 command = OrderedDict({
162 'command': 'lava_test_shell',
163 'parameters': {
164 'testdef_repos': [
165 {
166 'git-repo': 'https://github.com/lttng/lttng-ci.git',
167 'revision': 'master',
168 'testdef': 'lava/baremetal-tests/kernel-tests.yml'
169 }
170 ],
171 'timeout': 18000
172 }
173 })
174 return command
175
176 def get_results_cmd(stream_name):
177 command = OrderedDict({
178 'command': 'submit_results',
179 'parameters': {
180 'server': 'http://lava-master.internal.efficios.com/RPC2/'
181 }
182 })
183 command['parameters']['stream']='/anonymous/'+stream_name+'/'
184 return command
185
186 def get_deploy_cmd(jenkins_job, kernel_path, linux_modules_path, lttng_modules_path, nb_iter=None):
187 command = OrderedDict({
188 'command': 'deploy_kernel',
189 'metadata': {},
190 'parameters': {
191 'overlays': [],
192 'kernel': None,
193 'nfsrootfs': str(SCP_PATH+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
194 'target_type': 'ubuntu'
195 }
196 })
197
198 command['parameters']['overlays'].append( str(SCP_PATH+linux_modules_path))
199 command['parameters']['overlays'].append( str(SCP_PATH+lttng_modules_path))
200 command['parameters']['kernel'] = str(SCP_PATH+kernel_path)
201 command['metadata']['jenkins_jobname'] = jenkins_job
202 if nb_iter is not None:
203 command['metadata']['nb_iterations'] = nb_iter
204
205 return command
206
207
208 def get_env_setup_cmd(lttng_tools_commit, lttng_ust_commit=None):
209 command = OrderedDict({
210 'command': 'lava_command_run',
211 'parameters': {
212 'commands': [
213 'git clone https://github.com/frdeso/syscall-bench-it.git bm',
214 'pip3 install vlttng',
215 ],
216 'timeout': 18000
217 }
218 })
219
220 vlttng_cmd = 'vlttng --jobs=16 --profile urcu-master' \
221 ' --profile babeltrace-stable-1.4 ' \
222 ' --profile lttng-tools-master' \
223 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
224 ' --profile lttng-tools-no-man-pages'
225
226 if lttng_ust_commit is not None:
227 vlttng_cmd += ' --profile lttng-ust-master ' \
228 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
229 ' --profile lttng-ust-no-man-pages'
230
231 vlttng_cmd += " /tmp/virtenv"
232
233 command['parameters']['commands'].append(vlttng_cmd)
234 return command
235
236 def main():
237 test_type = None
238 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
239 parser.add_argument('-t', '--type', required=True)
240 parser.add_argument('-j', '--jobname', required=True)
241 parser.add_argument('-k', '--kernel', required=True)
242 parser.add_argument('-km', '--kmodule', required=True)
243 parser.add_argument('-lm', '--lmodule', required=True)
244 parser.add_argument('-l', '--lava-key', required=True)
245 parser.add_argument('-tc', '--tools-commit', required=True)
246 parser.add_argument('-uc', '--ust-commit', required=False)
247 args = parser.parse_args()
248
249
250 j = create_new_job(args.jobname)
251 j['actions'].append(get_deploy_cmd(args.jobname, args.kernel, args.kmodule, args.lmodule))
252 j['actions'].append(get_boot_cmd())
253 j['actions'].append(get_config_cmd())
254
255 if args.type in 'benchmarks':
256 test_type = TestType.benchmarks
257 elif args.type in 'tests':
258 test_type = TestType.tests
259 else:
260 print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
261 return -1
262
263 if test_type is TestType.benchmarks:
264 j['actions'].append(get_env_setup_cmd(args.tools_commit))
265 j['actions'].append(get_benchmarks_cmd())
266 j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
267 elif test_type is TestType.tests:
268 if args.ust_commit is None:
269 print('Tests runs need -uc/--ust-commit options. Exiting...')
270 return -1
271 j['actions'].append(get_env_setup_cmd(args.tools_commit, args.ust_commit))
272 j['actions'].append(get_tests_cmd())
273 j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
274 else:
275 assert False, 'Unknown test type'
276
277 server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME))
278
279 jobid = server.scheduler.submit_job(json.dumps(j))
280
281 #Check the status of the job every 30 seconds
282 jobstatus = server.scheduler.job_status(jobid)['job_status']
283 while jobstatus in 'Submitted' or jobstatus in 'Running':
284 time.sleep(30)
285 jobstatus = server.scheduler.job_status(jobid)['job_status']
286
287 print('Job ended with {} status.'.format(jobstatus))
288 if jobstatus not in 'Complete':
289 return -1
290
291 passed, failed=check_job_all_test_cases_state_count(server, jobid)
292
293 print('With {} passed tests and {} failed tests.'.format(passed, failed))
294
295 if test_type is TestType.tests:
296 print_test_output(server, jobid)
297
298 if failed == 0:
299 return 0
300 else:
301 return -1
302
303 if __name__ == "__main__":
304 sys.exit(main())
This page took 0.03678 seconds and 5 git commands to generate.