2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
25 from urllib
.parse
import urljoin
26 from urllib
.request
import urlretrieve
28 from jinja2
import Environment
, FileSystemLoader
30 USERNAME
= 'lava-jenkins'
31 HOSTNAME
= 'lava-master-02.internal.efficios.com'
32 OBJSTORE_URL
= "https://obj.internal.efficios.com/lava/results/"
36 """ Enum like for test type """
38 baremetal_benchmarks
= 1
42 'baremetal-benchmarks': baremetal_benchmarks
,
43 'baremetal-tests': baremetal_tests
,
44 'kvm-tests': kvm_tests
,
49 """ Enum like for device type """
53 values
= {'kvm': kvm
, 'x86': x86
}
56 def get_job_bundle_content(server
, job
):
58 bundle_sha
= server
.scheduler
.job_status(str(job
))['bundle_sha1']
59 bundle
= server
.dashboard
.get(bundle_sha
)
60 except xmlrpc
.client
.Fault
as error
:
61 print('Error while fetching results bundle', error
.faultString
)
64 return json
.loads(bundle
['content'])
67 def check_job_all_test_cases_state_count(server
, job
):
69 Parse the results bundle to see the run-tests testcase
70 of the lttng-kernel-tests passed successfully
72 print("Testcase result:")
73 content
= server
.results
.get_testjob_results_yaml(str(job
))
74 testcases
= yaml
.unsafe_load(content
)
78 for testcase
in testcases
:
79 if testcase
['result'] != 'pass':
81 "\tFAILED {}\n\t\t See http://{}{}".format(
82 testcase
['name'], HOSTNAME
, testcase
['url']
88 return (passed_tests
, failed_tests
)
91 def fetch_benchmark_results(build_id
):
93 Get the benchmark results from the objstore
94 save them as CSV files localy
97 'processed_results_close.csv',
98 'processed_results_ioctl.csv',
99 'processed_results_open_efault.csv',
100 'processed_results_open_enoent.csv',
101 'processed_results_dup_close.csv',
102 'processed_results_raw_syscall_getpid.csv',
103 'processed_results_lttng_test_filter.csv',
105 for testcase
in testcases
:
106 url
= urljoin(OBJSTORE_URL
, "{:s}/{:s}".format(build_id
, testcase
))
107 print('Fetching {}'.format(url
))
108 urlretrieve(url
, testcase
)
111 def print_test_output(server
, job
):
113 Parse the attachment of the testcase to fetch the stdout of the test suite
115 job_finished
, log
= server
.scheduler
.jobs
.logs(str(job
))
116 logs
= yaml
.unsafe_load(log
.data
.decode('ascii'))
119 if line
['lvl'] != 'target':
121 if line
['msg'] == '<LAVA_SIGNAL_STARTTC run-tests>':
122 print('---- TEST SUITE OUTPUT BEGIN ----')
125 if line
['msg'] == '<LAVA_SIGNAL_ENDTC run-tests>':
126 print('----- TEST SUITE OUTPUT END -----')
130 print("{} {}".format(line
['dt'], line
['msg']))
134 lttng_version
, lttng_tools_url
, lttng_tools_commit
, lttng_ust_url
=None, lttng_ust_commit
=None
137 Return vlttng cmd to be used in the job template for setup.
141 'vlttng --jobs=$(nproc) --profile urcu-master'
142 ' --override projects.babeltrace.build-env.PYTHON=python3'
143 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config'
144 ' --profile babeltrace-stable-1.4'
145 ' --profile babeltrace-python'
146 ' --profile lttng-tools-master'
147 ' --override projects.lttng-tools.source='
149 + ' --override projects.lttng-tools.checkout='
151 + ' --profile lttng-tools-no-man-pages'
154 if lttng_ust_commit
is not None:
156 ' --profile lttng-ust-master '
157 ' --override projects.lttng-ust.source='
159 + ' --override projects.lttng-ust.checkout='
161 + ' --profile lttng-ust-no-man-pages'
165 # Get the major and minor version numbers from the lttng version string.
166 version_match
= re
.search('stable-(\d).(\d\d)', lttng_version
)
168 if version_match
is not None:
169 major_version
= int(version_match
.group(1))
170 minor_version
= int(version_match
.group(2))
172 # Setting to zero to make the comparison below easier.
176 if lttng_version
== 'master' or (major_version
>= 2 and minor_version
>= 11):
178 ' --override projects.lttng-tools.configure+=--enable-test-sdt-uprobe'
181 vlttng_path
= '/tmp/virtenv'
183 vlttng_cmd
+= ' ' + vlttng_path
189 nfsrootfs
= "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_xenial_2018-12-05.tar.gz"
191 parser
= argparse
.ArgumentParser(description
='Launch baremetal test using Lava')
192 parser
.add_argument('-t', '--type', required
=True)
193 parser
.add_argument('-lv', '--lttng-version', required
=True)
194 parser
.add_argument('-j', '--jobname', required
=True)
195 parser
.add_argument('-k', '--kernel', required
=True)
196 parser
.add_argument('-lm', '--lmodule', required
=True)
197 parser
.add_argument('-tu', '--tools-url', required
=True)
198 parser
.add_argument('-tc', '--tools-commit', required
=True)
199 parser
.add_argument('-id', '--build-id', required
=True)
200 parser
.add_argument('-uu', '--ust-url', required
=False)
201 parser
.add_argument('-uc', '--ust-commit', required
=False)
202 parser
.add_argument('-d', '--debug', required
=False, action
='store_true')
203 args
= parser
.parse_args()
205 if args
.type not in TestType
.values
:
206 print('argument -t/--type {} unrecognized.'.format(args
.type))
207 print('Possible values are:')
208 for k
in TestType
.values
:
209 print('\t {}'.format(k
))
215 lava_api_key
= os
.environ
['LAVA2_JENKINS_TOKEN']
216 except Exception as error
:
218 'LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...',
223 jinja_loader
= FileSystemLoader(os
.path
.dirname(os
.path
.realpath(__file__
)))
224 jinja_env
= Environment(loader
=jinja_loader
, trim_blocks
=True, lstrip_blocks
=True)
225 jinja_template
= jinja_env
.get_template('template_lava_job.jinja2')
227 test_type
= TestType
.values
[args
.type]
229 if test_type
in [TestType
.baremetal_benchmarks
, TestType
.baremetal_tests
]:
230 device_type
= DeviceType
.x86
232 device_type
= DeviceType
.kvm
234 vlttng_path
= '/tmp/virtenv'
236 vlttng_cmd
= get_vlttng_cmd(
237 args
.lttng_version
, args
.tools_url
, args
.tools_commit
, args
.ust_url
, args
.ust_commit
241 context
['DeviceType'] = DeviceType
242 context
['TestType'] = TestType
244 context
['job_name'] = args
.jobname
245 context
['test_type'] = test_type
246 context
['random_seed'] = random
.randint(0, 1000000)
247 context
['device_type'] = device_type
249 context
['vlttng_cmd'] = vlttng_cmd
250 context
['vlttng_path'] = vlttng_path
252 context
['kernel_url'] = args
.kernel
253 context
['nfsrootfs_url'] = nfsrootfs
254 context
['lttng_modules_url'] = args
.lmodule
255 context
['jenkins_build_id'] = args
.build_id
257 context
['kprobe_round_nb'] = 10
259 render
= jinja_template
.render(context
)
261 print('Job to be submitted:')
268 server
= xmlrpc
.client
.ServerProxy(
269 'http://%s:%s@%s/RPC2' % (USERNAME
, lava_api_key
, HOSTNAME
)
272 for attempt
in range(10):
274 jobid
= server
.scheduler
.submit_job(render
)
275 except xmlrpc
.client
.ProtocolError
as error
:
277 'Protocol error on submit, sleeping and retrying. Attempt #{}'.format(
286 print('Lava jobid:{}'.format(jobid
))
288 'Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}'.format(
293 # Check the status of the job every 30 seconds
294 jobstatus
= server
.scheduler
.job_state(jobid
)['job_state']
296 while jobstatus
in ['Submitted', 'Scheduling', 'Scheduled', 'Running']:
297 if not running
and jobstatus
== 'Running':
298 print('Job started running')
302 jobstatus
= server
.scheduler
.job_state(jobid
)['job_state']
303 except xmlrpc
.client
.ProtocolError
as error
:
304 print('Protocol error, retrying')
306 print('Job ended with {} status.'.format(jobstatus
))
308 if jobstatus
!= 'Finished':
311 if test_type
is TestType
.kvm_tests
or test_type
is TestType
.baremetal_tests
:
312 print_test_output(server
, jobid
)
313 elif test_type
is TestType
.baremetal_benchmarks
:
314 fetch_benchmark_results(args
.build_id
)
316 passed
, failed
= check_job_all_test_cases_state_count(server
, jobid
)
317 print('With {} passed and {} failed Lava test cases.'.format(passed
, failed
))
325 if __name__
== "__main__":
This page took 0.073003 seconds and 4 git commands to generate.