Change lava api user
[lttng-ci.git] / scripts / system-tests / lava2-submit.py
1 #!/usr/bin/python
2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3 #
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import argparse
18 import base64
19 import json
20 import os
21 import random
22 import sys
23 import time
24 import yaml
25 import xmlrpc.client
26 import pprint
27
28 from jinja2 import Environment, FileSystemLoader, meta
29
30 USERNAME = 'lava-jenkins'
31 HOSTNAME = 'lava-master-02.internal.efficios.com'
32
33 class TestType():
34 baremetal_benchmarks=1
35 baremetal_tests=2
36 kvm_tests=3
37 kvm_fuzzing_tests=4
38 values = {
39 'baremetal-benchmarks' : baremetal_benchmarks,
40 'baremetal-tests' : baremetal_tests,
41 'kvm-tests' : kvm_tests,
42 'kvm-fuzzin-tests' : kvm_fuzzing_tests,
43 }
44
45 class DeviceType():
46 x86 = 'x86'
47 kvm = 'kvm'
48 values = {
49 'kvm' : kvm,
50 'x86' : x86,
51 }
52
53 def get_packages():
54 return ['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
55 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
56 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
57 'libnuma-dev', 'python3-dev', 'swig', 'stress']
58
59 def get_job_bundle_content(server, job):
60 try:
61 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
62 bundle = server.dashboard.get(bundle_sha)
63 except xmlrpc.client.Fault as f:
64 print('Error while fetching results bundle', f.faultString)
65 raise f
66
67 return json.loads(bundle['content'])
68
69 # Parse the results bundle to see the run-tests testcase
70 # of the lttng-kernel-tests passed successfully
71 def check_job_all_test_cases_state_count(server, job):
72 content = get_job_bundle_content(server, job)
73
74 # FIXME:Those tests are part of the boot actions and fail randomly but
75 # doesn't affect the behaviour of the tests. We should update our Lava
76 # installation and try to reproduce it. This error was encountered on
77 # Ubuntu 16.04.
78 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
79
80 passed_tests=0
81 failed_tests=0
82 for run in content['test_runs']:
83 for result in run['test_results']:
84 if 'test_case_id' in result :
85 if result['result'] in 'pass':
86 passed_tests+=1
87 elif result['test_case_id'] in tests_known_to_fail:
88 pass
89 else:
90 failed_tests+=1
91 return (passed_tests, failed_tests)
92
93 # Get the benchmark results from the lava bundle
94 # save them as CSV files localy
95 def fetch_benchmark_results(server, job):
96 content = get_job_bundle_content(server, job)
97 testcases = ['processed_results_close.csv',
98 'processed_results_ioctl.csv',
99 'processed_results_open_efault.csv',
100 'processed_results_open_enoent.csv',
101 'processed_results_dup_close.csv',
102 'processed_results_raw_syscall_getpid.csv',
103 'processed_results_lttng_test_filter.csv']
104
105 # The result bundle is a large JSON containing the results of every testcase
106 # of the LAVA job as well as the files that were attached during the run.
107 # We need to iterate over this JSON to get the base64 representation of the
108 # benchmark results produced during the run.
109 for run in content['test_runs']:
110 # We only care of the benchmark testcases
111 if 'benchmark-' in run['test_id']:
112 if 'test_results' in run:
113 for res in run['test_results']:
114 if 'attachments' in res:
115 for a in res['attachments']:
116 # We only save the results file
117 if a['pathname'] in testcases:
118 with open(a['pathname'],'wb') as f:
119 # Convert the b64 representation of the
120 # result file and write it to a file
121 # in the current working directory
122 f.write(base64.b64decode(a['content']))
123
124 # Parse the attachment of the testcase to fetch the stdout of the test suite
125 def print_test_output(server, job):
126 content = get_job_bundle_content(server, job)
127 found = False
128
129 for run in content['test_runs']:
130 if run['test_id'] in 'lttng-kernel-test':
131 for attachment in run['attachments']:
132 if attachment['pathname'] in 'stdout.log':
133
134 # Decode the base64 file and split on newlines to iterate
135 # on list
136 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
137
138 testoutput = testoutput.replace('\\n', '\n')
139
140 # Create a generator to iterate on the lines and keeping
141 # the state of the iterator across the two loops.
142 testoutput_iter = iter(testoutput.split('\n'))
143 for line in testoutput_iter:
144
145 # Find the header of the test case and start printing
146 # from there
147 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
148 print('---- TEST SUITE OUTPUT BEGIN ----')
149 for line in testoutput_iter:
150 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
151 print(line)
152 else:
153 # Print until we reach the end of the
154 # section
155 break
156
157 print('----- TEST SUITE OUTPUT END -----')
158 break
159
160 def get_vlttng_cmd(device, lttng_tools_commit, lttng_ust_commit=None):
161
162 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
163 ' --override projects.babeltrace.build-env.PYTHON=python3' \
164 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
165 ' --profile babeltrace-stable-1.4' \
166 ' --profile babeltrace-python' \
167 ' --profile lttng-tools-master' \
168 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
169 ' --profile lttng-tools-no-man-pages'
170
171 if lttng_ust_commit is not None:
172 vlttng_cmd += ' --profile lttng-ust-master ' \
173 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
174 ' --profile lttng-ust-no-man-pages'
175
176 if device is DeviceType.kvm:
177 vlttng_path = '/root/virtenv'
178 else:
179 vlttng_path = '/tmp/virtenv'
180
181 vlttng_cmd += ' ' + vlttng_path
182
183 return vlttng_cmd
184
185 def main():
186 test_type = None
187 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
188 parser.add_argument('-t', '--type', required=True)
189 parser.add_argument('-j', '--jobname', required=True)
190 parser.add_argument('-k', '--kernel', required=True)
191 parser.add_argument('-km', '--kmodule', required=True)
192 parser.add_argument('-lm', '--lmodule', required=True)
193 parser.add_argument('-tc', '--tools-commit', required=True)
194 parser.add_argument('-uc', '--ust-commit', required=False)
195 parser.add_argument('-d', '--debug', required=False, action='store_true')
196 args = parser.parse_args()
197
198 if args.type not in TestType.values:
199 print('argument -t/--type {} unrecognized.'.format(args.type))
200 print('Possible values are:')
201 for k in TestType.values:
202 print('\t {}'.format(k))
203 return -1
204
205 lava_api_key = None
206 if not args.debug:
207 try:
208 lava_api_key = os.environ['LAVA2_JENKINS_TOKEN']
209 except Exception as e:
210 print('LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
211 return -1
212
213 jinja_loader = FileSystemLoader(os.path.dirname(os.path.realpath(__file__)))
214 jinja_env = Environment(loader=jinja_loader, trim_blocks=True,
215 lstrip_blocks= True)
216 jinja_template = jinja_env.get_template('template_lava_job.jinja2')
217 template_source = jinja_env.loader.get_source(jinja_env, 'template_lava_job.jinja2')
218 parsed_content = jinja_env.parse(template_source)
219 undef = meta.find_undeclared_variables(parsed_content)
220
221 test_type = TestType.values[args.type]
222
223 if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
224 device_type = DeviceType.x86
225 vlttng_path = '/tmp/virtenv'
226 nfsrootfs = "/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz"
227
228 else:
229 device_type = DeviceType.kvm
230 vlttng_path = '/root/virtenv'
231 nfsrootfs = "/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz"
232
233 vlttng_cmd = get_vlttng_cmd(device_type, args.tools_commit, args.ust_commit)
234
235 context = dict()
236 context['DeviceType'] = DeviceType
237 context['TestType'] = TestType
238
239 context['job_name'] = args.jobname
240 context['test_type'] = test_type
241 context['packages'] = get_packages()
242 context['random_seed'] = random.randint(0, 1000000)
243 context['device_type'] = device_type
244
245 context['vlttng_cmd'] = vlttng_cmd
246 context['vlttng_path'] = vlttng_path
247
248 context['kernel_url'] = args.kernel
249 context['nfsrootfs_url'] = nfsrootfs
250 context['lttng_modules_url'] = args.lmodule
251 context['linux_modules_url'] = args.kmodule
252
253 context['kprobe_round_nb'] = 10
254
255 render = jinja_template.render(context)
256
257 print('Current context:')
258 pprint.pprint(context, indent=4)
259 print('Job to be submitted:')
260
261 print(render)
262
263 if args.debug:
264 return 0
265
266 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
267
268 jobid = server.scheduler.submit_job(render)
269
270 print('Lava jobid:{}'.format(jobid))
271 print('Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
272
273 #Check the status of the job every 30 seconds
274 jobstatus = server.scheduler.job_status(jobid)['job_status']
275 not_running = False
276 while jobstatus in 'Submitted' or jobstatus in 'Running':
277 if not_running is False and jobstatus in 'Running':
278 print('Job started running')
279 not_running = True
280 time.sleep(30)
281 jobstatus = server.scheduler.job_status(jobid)['job_status']
282
283 # Do not fetch result for now
284 # if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
285 # print_test_output(server, jobid)
286 # elif test_type is TestType.baremetal_benchmarks:
287 # fetch_benchmark_results(server, jobid)
288
289 print('Job ended with {} status.'.format(jobstatus))
290 if jobstatus not in 'Complete':
291 return -1
292 else:
293 passed, failed=check_job_all_test_cases_state_count(server, jobid)
294 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
295
296 if failed == 0:
297 return 0
298 else:
299 return -1
300
301 if __name__ == "__main__":
302 sys.exit(main())
This page took 0.03735 seconds and 5 git commands to generate.