2 # Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
25 from urllib
.parse
import urljoin
26 from urllib
.request
import urlretrieve
27 from collections
import OrderedDict
31 HOSTNAME
= 'lava-master.internal.efficios.com'
32 SCP_PATH
= 'scp://jenkins-lava@storage.internal.efficios.com'
33 OBJSTORE_URL
= "https://obj.internal.efficios.com/lava/results/"
36 baremetal_benchmarks
=1
41 def get_job_bundle_content(server
, job
):
43 bundle_sha
= server
.scheduler
.job_status(str(job
))['bundle_sha1']
44 bundle
= server
.dashboard
.get(bundle_sha
)
45 except xmlrpc
.client
.Fault
as f
:
46 print('Error while fetching results bundle', f
.faultString
)
49 return json
.loads(bundle
['content'])
51 # Parse the results bundle to see the run-tests testcase
52 # of the lttng-kernel-tests passed successfully
53 def check_job_all_test_cases_state_count(server
, job
):
54 content
= get_job_bundle_content(server
, job
)
56 # FIXME:Those tests are part of the boot actions and fail randomly but
57 # doesn't affect the behaviour of the tests. We should update our Lava
58 # installation and try to reproduce it. This error was encountered on
60 tests_known_to_fail
=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt']
64 for run
in content
['test_runs']:
65 for result
in run
['test_results']:
66 if 'test_case_id' in result
:
67 if result
['result'] in 'pass':
69 elif result
['test_case_id'] in tests_known_to_fail
:
73 return (passed_tests
, failed_tests
)
75 # Get the benchmark results from the lava bundle
76 # save them as CSV files localy
77 def fetch_benchmark_results(build_id
):
78 testcases
= ['processed_results_close.csv',
79 'processed_results_ioctl.csv',
80 'processed_results_open_efault.csv',
81 'processed_results_open_enoent.csv',
82 'processed_results_dup_close.csv',
83 'processed_results_raw_syscall_getpid.csv',
84 'processed_results_lttng_test_filter.csv']
85 for testcase
in testcases
:
86 url
= urljoin(OBJSTORE_URL
, "{:s}/{:s}".format(build_id
, testcase
))
87 urlretrieve(url
, testcase
)
89 # Parse the attachment of the testcase to fetch the stdout of the test suite
90 def print_test_output(server
, job
):
91 content
= get_job_bundle_content(server
, job
)
94 for run
in content
['test_runs']:
95 if run
['test_id'] in 'lttng-kernel-test':
96 for attachment
in run
['attachments']:
97 if attachment
['pathname'] in 'stdout.log':
99 # Decode the base64 file and split on newlines to iterate
101 testoutput
= str(base64
.b64decode(bytes(attachment
['content'], encoding
='UTF-8')))
103 testoutput
= testoutput
.replace('\\n', '\n')
105 # Create a generator to iterate on the lines and keeping
106 # the state of the iterator across the two loops.
107 testoutput_iter
= iter(testoutput
.split('\n'))
108 for line
in testoutput_iter
:
110 # Find the header of the test case and start printing
112 if 'LAVA_SIGNAL_STARTTC run-tests' in line
:
113 print('---- TEST SUITE OUTPUT BEGIN ----')
114 for line
in testoutput_iter
:
115 if 'LAVA_SIGNAL_ENDTC run-tests' not in line
:
118 # Print until we reach the end of the
122 print('----- TEST SUITE OUTPUT END -----')
125 def create_new_job(name
, build_device
):
127 'health_check': False,
129 'device_type': build_device
,
134 if build_device
in 'x86':
135 job
['tags'].append('dev-sda1')
140 command
= OrderedDict({
141 'command': 'boot_image'
145 def get_config_cmd(build_device
):
146 packages
=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \
147 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \
148 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \
149 'libnuma-dev', 'python3-dev', 'swig', 'stress']
150 command
= OrderedDict({
151 'command': 'lava_command_run',
154 'cat /etc/resolv.conf',
155 'echo nameserver 172.18.0.12 > /etc/resolv.conf',
161 if build_device
in 'x86':
162 command
['parameters']['commands'].extend([
163 'mount /dev/sda1 /tmp',
166 command
['parameters']['commands'].extend([
168 'locale-gen en_US.UTF-8',
171 'apt-get install -y {}'.format(' '.join(packages
))
175 def get_baremetal_benchmarks_cmd(build_id
):
176 command
= OrderedDict({
177 'command': 'lava_test_shell',
181 'git-repo': 'https://github.com/lttng/lttng-ci.git',
182 'revision': 'master',
183 'testdef': 'lava/system-tests/failing-close.yml',
184 'parameters': { 'JENKINS_BUILD_ID': build_id
}
187 'git-repo': 'https://github.com/lttng/lttng-ci.git',
188 'revision': 'master',
189 'testdef': 'lava/system-tests/failing-ioctl.yml',
190 'parameters': { 'JENKINS_BUILD_ID': build_id
}
193 'git-repo': 'https://github.com/lttng/lttng-ci.git',
194 'revision': 'master',
195 'testdef': 'lava/system-tests/failing-open-efault.yml',
196 'parameters': { 'JENKINS_BUILD_ID': build_id
}
199 'git-repo': 'https://github.com/lttng/lttng-ci.git',
200 'revision': 'master',
201 'testdef': 'lava/system-tests/success-dup-close.yml',
202 'parameters': { 'JENKINS_BUILD_ID': build_id
}
205 'git-repo': 'https://github.com/lttng/lttng-ci.git',
206 'revision': 'master',
207 'testdef': 'lava/system-tests/raw-syscall-getpid.yml',
208 'parameters': { 'JENKINS_BUILD_ID': build_id
}
211 'git-repo': 'https://github.com/lttng/lttng-ci.git',
212 'revision': 'master',
213 'testdef': 'lava/system-tests/failing-open-enoent.yml',
214 'parameters': { 'JENKINS_BUILD_ID': build_id
}
217 'git-repo': 'https://github.com/lttng/lttng-ci.git',
218 'revision': 'master',
219 'testdef': 'lava/system-tests/lttng-test-filter.yml',
220 'parameters': { 'JENKINS_BUILD_ID': build_id
}
228 def get_baremetal_tests_cmd(build_id
):
229 command
= OrderedDict({
230 'command': 'lava_test_shell',
234 'git-repo': 'https://github.com/lttng/lttng-ci.git',
235 'revision': 'master',
236 'testdef': 'lava/system-tests/perf-tests.yml',
237 'parameters': { 'JENKINS_BUILD_ID': build_id
}
245 def get_kvm_tests_cmd(build_id
):
246 command
= OrderedDict({
247 'command': 'lava_test_shell',
251 'git-repo': 'https://github.com/lttng/lttng-ci.git',
252 'revision': 'master',
253 'testdef': 'lava/system-tests/kernel-tests.yml',
254 'parameters': { 'JENKINS_BUILD_ID': build_id
}
257 'git-repo': 'https://github.com/lttng/lttng-ci.git',
258 'revision': 'master',
259 'testdef': 'lava/system-tests/destructive-tests.yml',
260 'parameters': { 'JENKINS_BUILD_ID': build_id
}
268 def get_kprobes_generate_data_cmd():
269 random_seed
= random
.randint(0, 1000000)
270 command
= OrderedDict({
271 'command': 'lava_test_shell',
275 'git-repo': 'https://github.com/lttng/lttng-ci.git',
276 'revision': 'master',
277 'testdef': 'lava/system-tests/kprobe-fuzzing-generate-data.yml',
278 'parameters': { 'RANDOM_SEED': str(random_seed
) }
286 def get_kprobes_test_cmd(round_nb
):
287 command
= OrderedDict({
288 'command': 'lava_test_shell',
292 'git-repo': 'https://github.com/lttng/lttng-ci.git',
293 'revision': 'master',
294 'testdef': 'lava/system-tests/kprobe-fuzzing-tests.yml',
295 'parameters': { 'ROUND_NB': str(round_nb
) }
303 def get_results_cmd(stream_name
):
304 command
= OrderedDict({
305 'command': 'submit_results',
307 'server': 'http://lava-master.internal.efficios.com/RPC2/'
310 command
['parameters']['stream']='/anonymous/'+stream_name
+'/'
313 def get_deploy_cmd_kvm(jenkins_job
, kernel_path
, lttng_modules_path
):
314 command
= OrderedDict({
315 'command': 'deploy_kernel',
320 'target_type': 'ubuntu',
321 'rootfs': 'file:///var/lib/lava-server/default/media/images/xenial.img.gz',
322 'login_prompt': 'kvm02 login:',
327 command
['parameters']['customize'][SCP_PATH
+lttng_modules_path
]=['rootfs:/','archive']
328 command
['parameters']['kernel'] = str(SCP_PATH
+kernel_path
)
329 command
['metadata']['jenkins_jobname'] = jenkins_job
333 def get_deploy_cmd_x86(jenkins_job
, kernel_path
, lttng_modules_path
, nb_iter
=None):
334 command
= OrderedDict({
335 'command': 'deploy_kernel',
340 'nfsrootfs': str(SCP_PATH
+'/storage/jenkins-lava/rootfs/rootfs_amd64_trusty_2016-02-23-1134.tar.gz'),
341 'target_type': 'ubuntu'
345 command
['parameters']['overlays'].append( str(SCP_PATH
+lttng_modules_path
))
346 command
['parameters']['kernel'] = str(SCP_PATH
+kernel_path
)
347 command
['metadata']['jenkins_jobname'] = jenkins_job
348 if nb_iter
is not None:
349 command
['metadata']['nb_iterations'] = nb_iter
354 def get_env_setup_cmd(build_device
, lttng_tools_commit
, lttng_ust_commit
=None):
355 command
= OrderedDict({
356 'command': 'lava_command_run',
359 'pip3 install --upgrade pip',
361 'pip3 install vlttng',
367 vlttng_cmd
= 'vlttng --jobs=$(nproc) --profile urcu-master' \
368 ' --override projects.babeltrace.build-env.PYTHON=python3' \
369 ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \
370 ' --profile babeltrace-stable-1.4' \
371 ' --profile babeltrace-python' \
372 ' --profile lttng-tools-master' \
373 ' --override projects.lttng-tools.checkout='+lttng_tools_commit
+ \
374 ' --profile lttng-tools-no-man-pages'
376 if lttng_ust_commit
is not None:
377 vlttng_cmd
+= ' --profile lttng-ust-master ' \
378 ' --override projects.lttng-ust.checkout='+lttng_ust_commit
+ \
379 ' --profile lttng-ust-no-man-pages'
382 if build_device
in 'kvm':
383 virtenv_path
= '/root/virtenv'
385 virtenv_path
= '/tmp/virtenv'
387 vlttng_cmd
+= ' '+virtenv_path
389 command
['parameters']['commands'].append(vlttng_cmd
)
390 command
['parameters']['commands'].append('ln -s '+virtenv_path
+' /root/lttngvenv')
391 command
['parameters']['commands'].append('sync')
397 parser
= argparse
.ArgumentParser(description
='Launch baremetal test using Lava')
398 parser
.add_argument('-t', '--type', required
=True)
399 parser
.add_argument('-j', '--jobname', required
=True)
400 parser
.add_argument('-k', '--kernel', required
=True)
401 parser
.add_argument('-lm', '--lmodule', required
=True)
402 parser
.add_argument('-tc', '--tools-commit', required
=True)
403 parser
.add_argument('-id', '--build-id', required
=True)
404 parser
.add_argument('-uc', '--ust-commit', required
=False)
405 args
= parser
.parse_args()
407 if args
.type in 'baremetal-benchmarks':
408 test_type
= TestType
.baremetal_benchmarks
409 elif args
.type in 'baremetal-tests':
410 test_type
= TestType
.baremetal_tests
411 elif args
.type in 'kvm-tests':
412 test_type
= TestType
.kvm_tests
413 elif args
.type in 'kvm-fuzzing-tests':
414 test_type
= TestType
.kvm_fuzzing_tests
416 print('argument -t/--type {} unrecognized. Exiting...'.format(args
.type))
421 lava_api_key
= os
.environ
['LAVA_JENKINS_TOKEN']
422 except Exception as e
:
423 print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e
)
426 if test_type
is TestType
.baremetal_benchmarks
:
427 j
= create_new_job(args
.jobname
, build_device
='x86')
428 j
['actions'].append(get_deploy_cmd_x86(args
.jobname
, args
.kernel
, args
.lmodule
))
429 elif test_type
is TestType
.baremetal_tests
:
430 j
= create_new_job(args
.jobname
, build_device
='x86')
431 j
['actions'].append(get_deploy_cmd_x86(args
.jobname
, args
.kernel
, args
.lmodule
))
432 elif test_type
is TestType
.kvm_tests
or test_type
is TestType
.kvm_fuzzing_tests
:
433 j
= create_new_job(args
.jobname
, build_device
='kvm')
434 j
['actions'].append(get_deploy_cmd_kvm(args
.jobname
, args
.kernel
, args
.lmodule
))
436 j
['actions'].append(get_boot_cmd())
438 if test_type
is TestType
.baremetal_benchmarks
:
439 j
['actions'].append(get_config_cmd('x86'))
440 j
['actions'].append(get_env_setup_cmd('x86', args
.tools_commit
))
441 j
['actions'].append(get_baremetal_benchmarks_cmd(args
.build_id
))
442 j
['actions'].append(get_results_cmd(stream_name
='benchmark-kernel'))
443 elif test_type
is TestType
.baremetal_tests
:
444 if args
.ust_commit
is None:
445 print('Tests runs need -uc/--ust-commit options. Exiting...')
447 j
['actions'].append(get_config_cmd('x86'))
448 j
['actions'].append(get_env_setup_cmd('x86', args
.tools_commit
, args
.ust_commit
))
449 j
['actions'].append(get_baremetal_tests_cmd(args
.build_id
))
450 j
['actions'].append(get_results_cmd(stream_name
='tests-kernel'))
451 elif test_type
is TestType
.kvm_tests
:
452 if args
.ust_commit
is None:
453 print('Tests runs need -uc/--ust-commit options. Exiting...')
455 j
['actions'].append(get_config_cmd('kvm'))
456 j
['actions'].append(get_env_setup_cmd('kvm', args
.tools_commit
, args
.ust_commit
))
457 j
['actions'].append(get_kvm_tests_cmd(args
.build_id
))
458 j
['actions'].append(get_results_cmd(stream_name
='tests-kernel'))
459 elif test_type
is TestType
.kvm_fuzzing_tests
:
460 if args
.ust_commit
is None:
461 print('Tests runs need -uc/--ust-commit options. Exiting...')
463 j
['actions'].append(get_config_cmd('kvm'))
464 j
['actions'].append(get_kprobes_generate_data_cmd())
466 j
['actions'].append(get_kprobes_test_cmd(round_nb
=i
))
467 j
['actions'].append(get_results_cmd(stream_name
='tests-kernel'))
469 assert False, 'Unknown test type'
471 server
= xmlrpc
.client
.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME
, lava_api_key
, HOSTNAME
))
473 jobid
= server
.scheduler
.submit_job(json
.dumps(j
))
475 print('Lava jobid:{}'.format(jobid
))
476 print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid
))
478 #Check the status of the job every 30 seconds
479 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
481 while jobstatus
in 'Submitted' or jobstatus
in 'Running':
482 if not_running
is False and jobstatus
in 'Running':
483 print('Job started running')
486 jobstatus
= server
.scheduler
.job_status(jobid
)['job_status']
488 if test_type
is TestType
.kvm_tests
or test_type
is TestType
.baremetal_tests
:
489 print_test_output(server
, jobid
)
490 elif test_type
is TestType
.baremetal_benchmarks
:
491 fetch_benchmark_results(args
.build_id
)
493 print('Job ended with {} status.'.format(jobstatus
))
494 if jobstatus
not in 'Complete':
497 passed
, failed
=check_job_all_test_cases_state_count(server
, jobid
)
498 print('With {} passed and {} failed Lava test cases.'.format(passed
, failed
))
505 if __name__
== "__main__":
This page took 0.043823 seconds and 4 git commands to generate.