Rename lava-v2-submit to lava2-submit
[lttng-ci.git] / scripts / system-tests / lava2-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import random
22 import sys
23 import time
24 import yaml
25 import xmlrpc.client
26 from collections import OrderedDict
27 from enum import Enum
28 from jinja2 import Environment, FileSystemLoader, meta
29
30 USERNAME = 'frdeso'
31 HOSTNAME = 'lava-master.internal.efficios.com'
32 SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
33
34 class TestType():
35 baremetal_benchmarks=1
36 baremetal_tests=2
37 kvm_tests=3
38 kvm_fuzzing_tests=4
39 values = {
40 'baremetal-benchmarks' : baremetal_benchmarks,
41 'baremetal-tests' : baremetal_tests,
42 'kvm-tests' : kvm_tests,
43 'kvm-fuzzin-tests' : kvm_fuzzing_tests,
44 }
45
46 class DeviceType():
47 x86 = 'x86'
48 kvm = 'kvm'
49 values = {
50 'kvm' : kvm,
51 'x86' : x86,
52 }
53
54 def get_packages():
55 return ['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
56 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
57 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
58 'libnuma-dev', 'python3-dev', 'swig', 'stress']
59
60 def get_job_bundle_content(server, job):
61 try:
62 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
63 bundle = server.dashboard.get(bundle_sha)
64 except xmlrpc.client.Fault as f:
65 print('Error while fetching results bundle', f.faultString)
66 raise f
67
68 return json.loads(bundle['content'])
69
70 # Parse the results bundle to see the run-tests testcase
71 # of the lttng-kernel-tests passed successfully
72 def check_job_all_test_cases_state_count(server, job):
73 content = get_job_bundle_content(server, job)
74
75 # FIXME:Those tests are part of the boot actions and fail randomly but
76 # doesn't affect the behaviour of the tests. We should update our Lava
77 # installation and try to reproduce it. This error was encountered on
78 # Ubuntu 16.04.
79 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
80
81 passed_tests=0
82 failed_tests=0
83 for run in content['test_runs']:
84 for result in run['test_results']:
85 if 'test_case_id' in result :
86 if result['result'] in 'pass':
87 passed_tests+=1
88 elif result['test_case_id'] in tests_known_to_fail:
89 pass
90 else:
91 failed_tests+=1
92 return (passed_tests, failed_tests)
93
94 # Get the benchmark results from the lava bundle
95 # save them as CSV files localy
96 def fetch_benchmark_results(server, job):
97 content = get_job_bundle_content(server, job)
98 testcases = ['processed_results_close.csv',
99 'processed_results_ioctl.csv',
100 'processed_results_open_efault.csv',
101 'processed_results_open_enoent.csv',
102 'processed_results_dup_close.csv',
103 'processed_results_raw_syscall_getpid.csv',
104 'processed_results_lttng_test_filter.csv']
105
106 # The result bundle is a large JSON containing the results of every testcase
107 # of the LAVA job as well as the files that were attached during the run.
108 # We need to iterate over this JSON to get the base64 representation of the
109 # benchmark results produced during the run.
110 for run in content['test_runs']:
111 # We only care of the benchmark testcases
112 if 'benchmark-' in run['test_id']:
113 if 'test_results' in run:
114 for res in run['test_results']:
115 if 'attachments' in res:
116 for a in res['attachments']:
117 # We only save the results file
118 if a['pathname'] in testcases:
119 with open(a['pathname'],'wb') as f:
120 # Convert the b64 representation of the
121 # result file and write it to a file
122 # in the current working directory
123 f.write(base64.b64decode(a['content']))
124
125 # Parse the attachment of the testcase to fetch the stdout of the test suite
126 def print_test_output(server, job):
127 content = get_job_bundle_content(server, job)
128 found = False
129
130 for run in content['test_runs']:
131 if run['test_id'] in 'lttng-kernel-test':
132 for attachment in run['attachments']:
133 if attachment['pathname'] in 'stdout.log':
134
135 # Decode the base64 file and split on newlines to iterate
136 # on list
137 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
138
139 testoutput = testoutput.replace('\\n', '\n')
140
141 # Create a generator to iterate on the lines and keeping
142 # the state of the iterator across the two loops.
143 testoutput_iter = iter(testoutput.split('\n'))
144 for line in testoutput_iter:
145
146 # Find the header of the test case and start printing
147 # from there
148 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
149 print('---- TEST SUITE OUTPUT BEGIN ----')
150 for line in testoutput_iter:
151 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
152 print(line)
153 else:
154 # Print until we reach the end of the
155 # section
156 break
157
158 print('----- TEST SUITE OUTPUT END -----')
159 break
160
161 def get_vlttng_cmd(device, lttng_tools_commit, lttng_ust_commit=None):
162
163 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
164 ' --override projects.babeltrace.build-env.PYTHON=python3' \
165 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
166 ' --profile babeltrace-stable-1.4' \
167 ' --profile babeltrace-python' \
168 ' --profile lttng-tools-master' \
169 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
170 ' --profile lttng-tools-no-man-pages'
171
172 if lttng_ust_commit is not None:
173 vlttng_cmd += ' --profile lttng-ust-master ' \
174 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
175 ' --profile lttng-ust-no-man-pages'
176
177 if device is DeviceType.kvm:
178 vlttng_path = '/root/virtenv'
179 else:
180 vlttng_path = '/tmp/virtenv'
181
182 vlttng_cmd += ' ' + vlttng_path
183
184 return vlttng_cmd
185
186 def main():
187 test_type = None
188 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
189 parser.add_argument('-t', '--type', required=True)
190 parser.add_argument('-j', '--jobname', required=True)
191 parser.add_argument('-k', '--kernel', required=True)
192 parser.add_argument('-km', '--kmodule', required=True)
193 parser.add_argument('-lm', '--lmodule', required=True)
194 parser.add_argument('-tc', '--tools-commit', required=True)
195 parser.add_argument('-uc', '--ust-commit', required=False)
196 parser.add_argument('-d', '--debug', required=False, action='store_true')
197 args = parser.parse_args()
198
199 if args.type not in TestType.values:
200 print('argument -t/--type {} unrecognized.'.format(args.type))
201 print('Possible values are:')
202 for k in TestType.values:
203 print('\t {}'.format(k))
204 return -1
205
206 lava_api_key = None
207 if not args.debug:
208 try:
209 lava_api_key = os.environ['LAVA_JENKINS_TOKEN']
210 except Exception as e:
211 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
212 return -1
213
214 jinja_loader = FileSystemLoader(os.path.dirname(os.path.realpath(__file__)))
215 jinja_env = Environment(loader=jinja_loader, trim_blocks=True,
216 lstrip_blocks= True)
217 jinja_template = jinja_env.get_template('template_lava_job.jinja2')
218 template_source = jinja_env.loader.get_source(jinja_env, 'template_lava_job.jinja2')
219 parsed_content = jinja_env.parse(template_source)
220 undef = meta.find_undeclared_variables(parsed_content)
221
222 test_type = TestType.values[args.type]
223
224 if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
225 device_type = DeviceType.x86
226 vlttng_path = '/tmp/virtenv'
227 nfsrootfs = "/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz"
228
229 else:
230 device_type = DeviceType.kvm
231 vlttng_path = '/root/virtenv'
232 nfsrootfs = "/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz"
233
234 vlttng_cmd = get_vlttng_cmd(device_type, args.tools_commit, args.ust_commit)
235
236 context = dict()
237 context['DeviceType'] = DeviceType
238 context['TestType'] = TestType
239
240 context['job_name'] = args.jobname
241 context['test_type'] = test_type
242 context['packages'] = get_packages()
243 context['random_seed'] = random.randint(0, 1000000)
244 context['device_type'] = device_type
245
246 context['vlttng_cmd'] = vlttng_cmd
247 context['vlttng_path'] = vlttng_path
248
249 context['kernel_url'] = args.kernel
250 context['nfsrootfs_url'] = nfsrootfs
251 context['lttng_modules_url'] = args.lmodule
252 context['linux_modules_url'] = args.kmodule
253
254 context['kprobe_round_nb'] = 10
255
256 print(context)
257 print(jinja_template.render(context))
258
259 if args.debug:
260 return 0
261
262 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
263
264 jobid = server.scheduler.submit_job(json.dumps(j))
265
266 print('Lava jobid:{}'.format(jobid))
267 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
268
269 #Check the status of the job every 30 seconds
270 jobstatus = server.scheduler.job_status(jobid)['job_status']
271 not_running = False
272 while jobstatus in 'Submitted' or jobstatus in 'Running':
273 if not_running is False and jobstatus in 'Running':
274 print('Job started running')
275 not_running = True
276 time.sleep(30)
277 jobstatus = server.scheduler.job_status(jobid)['job_status']
278
279 if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
280 print_test_output(server, jobid)
281 elif test_type is TestType.baremetal_benchmarks:
282 fetch_benchmark_results(server, jobid)
283
284 print('Job ended with {} status.'.format(jobstatus))
285 if jobstatus not in 'Complete':
286 return -1
287 else:
288 passed, failed=check_job_all_test_cases_state_count(server, jobid)
289 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
290
291 if failed == 0:
292 return 0
293 else:
294 return -1
295
296 if __name__ == "__main__":
297 sys.exit(main())
This page took 0.04009 seconds and 5 git commands to generate.