2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
28 from jinja2
import Environment
, FileSystemLoader
, meta
30 USERNAME
= 'lava-jenkins'
31 HOSTNAME
= 'lava-master-02.internal.efficios.com'
34 baremetal_benchmarks
=1
39 'baremetal-benchmarks' : baremetal_benchmarks
,
40 'baremetal-tests' : baremetal_tests
,
41 'kvm-tests' : kvm_tests
,
42 'kvm-fuzzin-tests' : kvm_fuzzing_tests
,
53 def get_job_bundle_content(server
, job
):
55 bundle_sha
= server
.scheduler
.job_status(str(job
))['bundle_sha1']
56 bundle
= server
.dashboard
.get(bundle_sha
)
57 except xmlrpc
.client
.Fault
as f
:
58 print('Error while fetching results bundle', f
.faultString
)
61 return json
.loads(bundle
['content'])
63 # Parse the results bundle to see the run-tests testcase
64 # of the lttng-kernel-tests passed successfully
65 def check_job_all_test_cases_state_count(server
, job
):
66 content
= get_job_bundle_content(server
, job
)
68 # FIXME:Those tests are part of the boot actions and fail randomly but
69 # doesn't affect the behaviour of the tests. We should update our Lava
70 # installation and try to reproduce it. This error was encountered on
72 tests_known_to_fail
=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
76 for run
in content
['test_runs']:
77 for result
in run
['test_results']:
78 if 'test_case_id' in result
:
79 if result
['result'] in 'pass':
81 elif result
['test_case_id'] in tests_known_to_fail
:
85 return (passed_tests
, failed_tests
)
87 # Get the benchmark results from the lava bundle
88 # save them as CSV files localy
89 def fetch_benchmark_results(server
, job
):
90 content
= get_job_bundle_content(server
, job
)
91 testcases
= ['processed_results_close.csv',
92 'processed_results_ioctl.csv',
93 'processed_results_open_efault.csv',
94 'processed_results_open_enoent.csv',
95 'processed_results_dup_close.csv',
96 'processed_results_raw_syscall_getpid.csv',
97 'processed_results_lttng_test_filter.csv']
99 # The result bundle is a large JSON containing the results of every testcase
100 # of the LAVA job as well as the files that were attached during the run.
101 # We need to iterate over this JSON to get the base64 representation of the
102 # benchmark results produced during the run.
103 for run
in content
['test_runs']:
104 # We only care of the benchmark testcases
105 if 'benchmark-' in run
['test_id']:
106 if 'test_results' in run
:
107 for res
in run
['test_results']:
108 if 'attachments' in res
:
109 for a
in res
['attachments']:
110 # We only save the results file
111 if a
['pathname'] in testcases
:
112 with
open(a
['pathname'],'wb') as f
:
113 # Convert the b64 representation of the
114 # result file and write it to a file
115 # in the current working directory
116 f
.write(base64
.b64decode(a
['content']))
118 # Parse the attachment of the testcase to fetch the stdout of the test suite
119 def print_test_output(server
, job
):
120 content
= get_job_bundle_content(server
, job
)
123 for run
in content
['test_runs']:
124 if run
['test_id'] in 'lttng-kernel-test':
125 for attachment
in run
['attachments']:
126 if attachment
['pathname'] in 'stdout.log':
128 # Decode the base64 file and split on newlines to iterate
130 testoutput
= str(base64
.b64decode(bytes(attachment
['content'], encoding
='UTF-8')))
132 testoutput
= testoutput
.replace('\\n', '\n')
134 # Create a generator to iterate on the lines and keeping
135 # the state of the iterator across the two loops.
136 testoutput_iter
= iter(testoutput
.split('\n'))
137 for line
in testoutput_iter
:
139 # Find the header of the test case and start printing
141 if 'LAVA_SIGNAL_STARTTC run-tests' in line
:
142 print('---- TEST SUITE OUTPUT BEGIN ----')
143 for line
in testoutput_iter
:
144 if 'LAVA_SIGNAL_ENDTC run-tests' not in line
:
147 # Print until we reach the end of the
151 print('----- TEST SUITE OUTPUT END -----')
154 def get_vlttng_cmd(device
, lttng_tools_commit
, lttng_ust_commit
=None):
156 vlttng_cmd
= 'vlttng --jobs=$(nproc) --profile urcu-master' \
157 ' --override projects.babeltrace.build-env.PYTHON=python3' \
158 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
159 ' --profile babeltrace-stable-1.4' \
160 ' --profile babeltrace-python' \
161 ' --profile lttng-tools-master' \
162 ' --override projects.lttng-tools.checkout='+lttng_tools_commit
+ \
163 ' --profile lttng-tools-no-man-pages'
165 if lttng_ust_commit
is not None:
166 vlttng_cmd
+= ' --profile lttng-ust-master ' \
167 ' --override projects.lttng-ust.checkout='+lttng_ust_commit
+ \
168 ' --profile lttng-ust-no-man-pages'
170 if device
is DeviceType
.kvm
:
171 vlttng_path
= '/root/virtenv'
173 vlttng_path
= '/tmp/virtenv'
175 vlttng_cmd
+= ' ' + vlttng_path
180 nfsrootfs
= "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_bionic_2018-11-29.tar.gz"
182 parser
= argparse
.ArgumentParser(description
='Launch baremetal test using Lava')
183 parser
.add_argument('-t', '--type', required
=True)
184 parser
.add_argument('-j', '--jobname', required
=True)
185 parser
.add_argument('-k', '--kernel', required
=True)
186 parser
.add_argument('-lm', '--lmodule', required
=True)
187 parser
.add_argument('-tc', '--tools-commit', required
=True)
188 parser
.add_argument('-id', '--build-id', required
=True)
189 parser
.add_argument('-uc', '--ust-commit', required
=False)
190 parser
.add_argument('-d', '--debug', required
=False, action
='store_true')
191 args
= parser
.parse_args()
193 if args
.type not in TestType
.values
:
194 print('argument -t/--type {} unrecognized.'.format(args
.type))
195 print('Possible values are:')
196 for k
in TestType
.values
:
197 print('\t {}'.format(k
))
203 lava_api_key
= os
.environ
['LAVA2_JENKINS_TOKEN']
204 except Exception as e
:
205 print('LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...', e
)
208 jinja_loader
= FileSystemLoader(os
.path
.dirname(os
.path
.realpath(__file__
)))
209 jinja_env
= Environment(loader
=jinja_loader
, trim_blocks
=True,
211 jinja_template
= jinja_env
.get_template('template_lava_job.jinja2')
212 template_source
= jinja_env
.loader
.get_source(jinja_env
, 'template_lava_job.jinja2')
213 parsed_content
= jinja_env
.parse(template_source
)
214 undef
= meta
.find_undeclared_variables(parsed_content
)
216 test_type
= TestType
.values
[args
.type]
218 if test_type
in [TestType
.baremetal_benchmarks
, TestType
.baremetal_tests
]:
219 device_type
= DeviceType
.x86
220 vlttng_path
= '/tmp/virtenv'
223 device_type
= DeviceType
.kvm
224 vlttng_path
= '/root/virtenv'
226 vlttng_cmd
= get_vlttng_cmd(device_type
, args
.tools_commit
, args
.ust_commit
)
229 context
['DeviceType'] = DeviceType
230 context
['TestType'] = TestType
232 context
['job_name'] = args
.jobname
233 context
['test_type'] = test_type
234 context
['random_seed'] = random
.randint(0, 1000000)
235 context
['device_type'] = device_type
237 context
['vlttng_cmd'] = vlttng_cmd
238 context
['vlttng_path'] = vlttng_path
240 context
['kernel_url'] = args
.kernel
241 context
['nfsrootfs_url'] = nfsrootfs
242 context
['lttng_modules_url'] = args
.lmodule
243 context
['jenkins_build_id'] = args
.build_id
245 context
['kprobe_round_nb'] = 10
247 render
= jinja_template
.render(context
)
249 print('Current context:')
250 pprint
.pprint(context
, indent
=4)
251 print('Job to be submitted:')
258 server
= xmlrpc
.client
.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME
, lava_api_key
, HOSTNAME
))
260 jobid
= server
.scheduler
.submit_job(render
)
262 print('Lava jobid:{}'.format(jobid
))
263 print('Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid
))
265 #Check the status of the job every 30 seconds
266 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
268 while jobstatus
in 'Submitted' or jobstatus
in 'Running':
269 if not_running
is False and jobstatus
in 'Running':
270 print('Job started running')
273 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
275 # Do not fetch result for now
276 # if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
277 # print_test_output(server, jobid)
278 # elif test_type is TestType.baremetal_benchmarks:
279 # fetch_benchmark_results(server, jobid)
281 print('Job ended with {} status.'.format(jobstatus
))
282 if jobstatus
not in 'Complete':
285 passed
, failed
=check_job_all_test_cases_state_count(server
, jobid
)
286 print('With {} passed and {} failed Lava test cases.'.format(passed
, failed
))
293 if __name__
== "__main__":
This page took 0.036337 seconds and 4 git commands to generate.