Fix: do not specify os type in deploy action
[lttng-ci.git] / scripts / system-tests / lava2-submit.py
CommitLineData
878b4840
JR
1#!/usr/bin/python
2# Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
3#
4# This program is free software: you can redistribute it and/or modify
5# it under the terms of the GNU General Public License as published by
6# the Free Software Foundation, either version 3 of the License, or
7# (at your option) any later version.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
13#
14# You should have received a copy of the GNU General Public License
15# along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17import argparse
18import base64
19import json
20import os
21import random
22import sys
23import time
4cb5cc4f 24import yaml
878b4840 25import xmlrpc.client
ef84c6ec
JR
26import pprint
27
4cb5cc4f 28from jinja2 import Environment, FileSystemLoader, meta
878b4840 29
ef84c6ec
JR
30USERNAME = 'lava-jenkins'
31HOSTNAME = 'lava-master-02.internal.efficios.com'
878b4840
JR
32
33class TestType():
34 baremetal_benchmarks=1
35 baremetal_tests=2
36 kvm_tests=3
37 kvm_fuzzing_tests=4
38 values = {
39 'baremetal-benchmarks' : baremetal_benchmarks,
40 'baremetal-tests' : baremetal_tests,
41 'kvm-tests' : kvm_tests,
42 'kvm-fuzzin-tests' : kvm_fuzzing_tests,
43 }
44
4cb5cc4f
JR
45class DeviceType():
46 x86 = 'x86'
f9a184a9 47 kvm = 'qemu'
4cb5cc4f
JR
48 values = {
49 'kvm' : kvm,
50 'x86' : x86,
51 }
52
53def get_packages():
54 return ['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
55 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
56 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
57 'libnuma-dev', 'python3-dev', 'swig', 'stress']
58
878b4840
JR
59def get_job_bundle_content(server, job):
60 try:
61 bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
62 bundle = server.dashboard.get(bundle_sha)
63 except xmlrpc.client.Fault as f:
64 print('Error while fetching results bundle', f.faultString)
65 raise f
66
67 return json.loads(bundle['content'])
68
69# Parse the results bundle to see the run-tests testcase
70# of the lttng-kernel-tests passed successfully
71def check_job_all_test_cases_state_count(server, job):
72 content = get_job_bundle_content(server, job)
73
74 # FIXME:Those tests are part of the boot actions and fail randomly but
75 # doesn't affect the behaviour of the tests. We should update our Lava
76 # installation and try to reproduce it. This error was encountered on
77 # Ubuntu 16.04.
78 tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
79
80 passed_tests=0
81 failed_tests=0
82 for run in content['test_runs']:
83 for result in run['test_results']:
84 if 'test_case_id' in result :
85 if result['result'] in 'pass':
86 passed_tests+=1
87 elif result['test_case_id'] in tests_known_to_fail:
88 pass
89 else:
90 failed_tests+=1
91 return (passed_tests, failed_tests)
92
93# Get the benchmark results from the lava bundle
94# save them as CSV files localy
95def fetch_benchmark_results(server, job):
96 content = get_job_bundle_content(server, job)
97 testcases = ['processed_results_close.csv',
98 'processed_results_ioctl.csv',
99 'processed_results_open_efault.csv',
100 'processed_results_open_enoent.csv',
101 'processed_results_dup_close.csv',
102 'processed_results_raw_syscall_getpid.csv',
103 'processed_results_lttng_test_filter.csv']
104
105 # The result bundle is a large JSON containing the results of every testcase
106 # of the LAVA job as well as the files that were attached during the run.
107 # We need to iterate over this JSON to get the base64 representation of the
108 # benchmark results produced during the run.
109 for run in content['test_runs']:
110 # We only care of the benchmark testcases
111 if 'benchmark-' in run['test_id']:
112 if 'test_results' in run:
113 for res in run['test_results']:
114 if 'attachments' in res:
115 for a in res['attachments']:
116 # We only save the results file
117 if a['pathname'] in testcases:
118 with open(a['pathname'],'wb') as f:
119 # Convert the b64 representation of the
120 # result file and write it to a file
121 # in the current working directory
122 f.write(base64.b64decode(a['content']))
123
124# Parse the attachment of the testcase to fetch the stdout of the test suite
125def print_test_output(server, job):
126 content = get_job_bundle_content(server, job)
127 found = False
128
129 for run in content['test_runs']:
130 if run['test_id'] in 'lttng-kernel-test':
131 for attachment in run['attachments']:
132 if attachment['pathname'] in 'stdout.log':
133
134 # Decode the base64 file and split on newlines to iterate
135 # on list
136 testoutput = str(base64.b64decode(bytes(attachment['content'], encoding='UTF-8')))
137
138 testoutput = testoutput.replace('\\n', '\n')
139
140 # Create a generator to iterate on the lines and keeping
141 # the state of the iterator across the two loops.
142 testoutput_iter = iter(testoutput.split('\n'))
143 for line in testoutput_iter:
144
145 # Find the header of the test case and start printing
146 # from there
147 if 'LAVA_SIGNAL_STARTTC run-tests' in line:
148 print('---- TEST SUITE OUTPUT BEGIN ----')
149 for line in testoutput_iter:
150 if 'LAVA_SIGNAL_ENDTC run-tests' not in line:
151 print(line)
152 else:
153 # Print until we reach the end of the
154 # section
155 break
156
157 print('----- TEST SUITE OUTPUT END -----')
158 break
159
c11ec858 160def get_vlttng_cmd(device, lttng_tools_commit, lttng_ust_commit=None):
878b4840
JR
161
162 vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \
163 ' --override projects.babeltrace.build-env.PYTHON=python3' \
164 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
165 ' --profile babeltrace-stable-1.4' \
166 ' --profile babeltrace-python' \
167 ' --profile lttng-tools-master' \
168 ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \
169 ' --profile lttng-tools-no-man-pages'
170
171 if lttng_ust_commit is not None:
172 vlttng_cmd += ' --profile lttng-ust-master ' \
173 ' --override projects.lttng-ust.checkout='+lttng_ust_commit+ \
174 ' --profile lttng-ust-no-man-pages'
175
c11ec858
JR
176 if device is DeviceType.kvm:
177 vlttng_path = '/root/virtenv'
178 else:
179 vlttng_path = '/tmp/virtenv'
180
4cb5cc4f 181 vlttng_cmd += ' ' + vlttng_path
878b4840 182
4cb5cc4f 183 return vlttng_cmd
878b4840
JR
184
185def main():
4c3d6586 186 nfsrootfs = "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz"
878b4840
JR
187 test_type = None
188 parser = argparse.ArgumentParser(description='Launch baremetal test using Lava')
189 parser.add_argument('-t', '--type', required=True)
190 parser.add_argument('-j', '--jobname', required=True)
191 parser.add_argument('-k', '--kernel', required=True)
878b4840
JR
192 parser.add_argument('-lm', '--lmodule', required=True)
193 parser.add_argument('-tc', '--tools-commit', required=True)
194 parser.add_argument('-uc', '--ust-commit', required=False)
f23dc688 195 parser.add_argument('-d', '--debug', required=False, action='store_true')
878b4840
JR
196 args = parser.parse_args()
197
198 if args.type not in TestType.values:
199 print('argument -t/--type {} unrecognized.'.format(args.type))
200 print('Possible values are:')
201 for k in TestType.values:
202 print('\t {}'.format(k))
203 return -1
878b4840
JR
204
205 lava_api_key = None
f23dc688
JR
206 if not args.debug:
207 try:
ef84c6ec 208 lava_api_key = os.environ['LAVA2_JENKINS_TOKEN']
f23dc688 209 except Exception as e:
ef84c6ec 210 print('LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...', e )
f23dc688 211 return -1
878b4840 212
4cb5cc4f
JR
213 jinja_loader = FileSystemLoader(os.path.dirname(os.path.realpath(__file__)))
214 jinja_env = Environment(loader=jinja_loader, trim_blocks=True,
215 lstrip_blocks= True)
216 jinja_template = jinja_env.get_template('template_lava_job.jinja2')
217 template_source = jinja_env.loader.get_source(jinja_env, 'template_lava_job.jinja2')
218 parsed_content = jinja_env.parse(template_source)
219 undef = meta.find_undeclared_variables(parsed_content)
220
221 test_type = TestType.values[args.type]
222
223 if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
224 device_type = DeviceType.x86
c11ec858 225 vlttng_path = '/tmp/virtenv'
4cb5cc4f 226
878b4840 227 else:
4cb5cc4f 228 device_type = DeviceType.kvm
c11ec858 229 vlttng_path = '/root/virtenv'
4cb5cc4f 230
c11ec858 231 vlttng_cmd = get_vlttng_cmd(device_type, args.tools_commit, args.ust_commit)
4cb5cc4f
JR
232
233 context = dict()
234 context['DeviceType'] = DeviceType
235 context['TestType'] = TestType
236
237 context['job_name'] = args.jobname
238 context['test_type'] = test_type
239 context['packages'] = get_packages()
240 context['random_seed'] = random.randint(0, 1000000)
241 context['device_type'] = device_type
242
243 context['vlttng_cmd'] = vlttng_cmd
244 context['vlttng_path'] = vlttng_path
245
246 context['kernel_url'] = args.kernel
247 context['nfsrootfs_url'] = nfsrootfs
248 context['lttng_modules_url'] = args.lmodule
4cb5cc4f
JR
249
250 context['kprobe_round_nb'] = 10
251
ef84c6ec
JR
252 render = jinja_template.render(context)
253
254 print('Current context:')
255 pprint.pprint(context, indent=4)
256 print('Job to be submitted:')
257
258 print(render)
878b4840 259
f23dc688 260 if args.debug:
f23dc688
JR
261 return 0
262
878b4840
JR
263 server = xmlrpc.client.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
264
ef84c6ec 265 jobid = server.scheduler.submit_job(render)
878b4840
JR
266
267 print('Lava jobid:{}'.format(jobid))
ef84c6ec 268 print('Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid))
878b4840
JR
269
270 #Check the status of the job every 30 seconds
271 jobstatus = server.scheduler.job_status(jobid)['job_status']
272 not_running = False
273 while jobstatus in 'Submitted' or jobstatus in 'Running':
274 if not_running is False and jobstatus in 'Running':
275 print('Job started running')
276 not_running = True
277 time.sleep(30)
278 jobstatus = server.scheduler.job_status(jobid)['job_status']
279
ef84c6ec
JR
280# Do not fetch result for now
281# if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
282# print_test_output(server, jobid)
283# elif test_type is TestType.baremetal_benchmarks:
284# fetch_benchmark_results(server, jobid)
878b4840
JR
285
286 print('Job ended with {} status.'.format(jobstatus))
287 if jobstatus not in 'Complete':
288 return -1
289 else:
290 passed, failed=check_job_all_test_cases_state_count(server, jobid)
291 print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
292
293 if failed == 0:
294 return 0
295 else:
296 return -1
297
298if __name__ == "__main__":
299 sys.exit(main())
This page took 0.03535 seconds and 4 git commands to generate.