X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=scripts%2Flttng-baremetal-tests%2Flava-submit.py;h=fc8cd3f267d9b5ce4f36a57e33f2229224ed8cbe;hb=bb368cdeb0b7bddfcccebbd264b82a650b1abb3c;hp=e4fbc7feed262f30cfd2257add51f9f650e00756;hpb=73c1d4bc4fa0f60708012e8620d5f279c4787d3f;p=lttng-ci.git diff --git a/scripts/lttng-baremetal-tests/lava-submit.py b/scripts/lttng-baremetal-tests/lava-submit.py index e4fbc7f..fc8cd3f 100644 --- a/scripts/lttng-baremetal-tests/lava-submit.py +++ b/scripts/lttng-baremetal-tests/lava-submit.py @@ -47,6 +47,12 @@ def get_job_bundle_content(server, job): def check_job_all_test_cases_state_count(server, job): content = get_job_bundle_content(server, job) + # FIXME:Those tests are part of the boot actions and fail randomly but + # doesn't affect the behaviour of the tests. We should update our Lava + # installation and try to reproduce it. This error was encountered on + # Ubuntu 16.04. + tests_known_to_fail=['mount', 'df', 'ls', 'ip', 'wait_for_test_image_prompt'] + passed_tests=0 failed_tests=0 for run in content['test_runs']: @@ -54,13 +60,7 @@ def check_job_all_test_cases_state_count(server, job): if 'test_case_id' in result : if result['result'] in 'pass': passed_tests+=1 - elif result['test_case_id'] in 'wait_for_test_image_prompt': - # FIXME:This test is part of the boot action and fails - # randomly but doesn't affect the behaviour of the tests. - # No reply on the Lava IRC channel yet. We should update - # our Lava installation and try to reproduce it. This error - # was encountered ont the KVM trusty image only. Not seen - # on Xenial at this point. + elif result['test_case_id'] in tests_known_to_fail: pass else: failed_tests+=1 @@ -70,7 +70,13 @@ def check_job_all_test_cases_state_count(server, job): # save them as CSV files localy def fetch_benchmark_results(server, job): content = get_job_bundle_content(server, job) - testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv'] + testcases = ['processed_results_close.csv', + 'processed_results_ioctl.csv', + 'processed_results_open_efault.csv', + 'processed_results_open_enoent.csv', + 'processed_results_dup_close.csv', + 'processed_results_raw_syscall_getpid.csv', + 'processed_results_lttng_test_filter.csv'] # The result bundle is a large JSON containing the results of every testcase # of the LAVA job as well as the files that were attached during the run. @@ -78,7 +84,7 @@ def fetch_benchmark_results(server, job): # benchmark results produced during the run. for run in content['test_runs']: # We only care of the benchmark testcases - if 'benchmark-syscall-' in run['test_id']: + if 'benchmark-' in run['test_id']: if 'test_results' in run: for res in run['test_results']: if 'attachments' in res: @@ -150,7 +156,8 @@ def get_boot_cmd(): def get_config_cmd(build_device): packages=['bsdtar', 'psmisc', 'wget', 'python3', 'python3-pip', \ 'libglib2.0-dev', 'libffi-dev', 'elfutils', 'libdw-dev', \ - 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev'] + 'libelf-dev', 'libmount-dev', 'libxml2', 'libpfm4-dev', \ + 'libnuma-dev', 'python3-dev', 'swig', 'stress'] command = OrderedDict({ 'command': 'lava_command_run', 'parameters': { @@ -158,7 +165,8 @@ def get_config_cmd(build_device): 'cat /etc/resolv.conf', 'echo nameserver 172.18.0.12 > /etc/resolv.conf', 'groupadd tracing' - ] + ], + 'timeout':300 } }) if build_device in 'x86': @@ -185,15 +193,35 @@ def get_baremetal_benchmarks_cmd(): 'revision': 'master', 'testdef': 'lava/baremetal-tests/failing-close.yml' }, + { + 'git-repo': 'https://github.com/lttng/lttng-ci.git', + 'revision': 'master', + 'testdef': 'lava/baremetal-tests/failing-ioctl.yml' + }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', 'testdef': 'lava/baremetal-tests/failing-open-efault.yml' }, + { + 'git-repo': 'https://github.com/lttng/lttng-ci.git', + 'revision': 'master', + 'testdef': 'lava/baremetal-tests/success-dup-close.yml' + }, + { + 'git-repo': 'https://github.com/lttng/lttng-ci.git', + 'revision': 'master', + 'testdef': 'lava/baremetal-tests/raw-syscall-getpid.yml' + }, { 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', 'testdef': 'lava/baremetal-tests/failing-open-enoent.yml' + }, + { + 'git-repo': 'https://github.com/lttng/lttng-ci.git', + 'revision': 'master', + 'testdef': 'lava/baremetal-tests/lttng-test-filter.yml' } ], 'timeout': 18000 @@ -231,6 +259,11 @@ def get_kvm_tests_cmd(): 'git-repo': 'https://github.com/lttng/lttng-ci.git', 'revision': 'master', 'testdef': 'lava/baremetal-tests/destructive-tests.yml' + }, + { + 'git-repo': 'https://github.com/lttng/lttng-ci.git', + 'revision': 'master', + 'testdef': 'lava/baremetal-tests/kprobe-fuzzing-tests.yml' } ], 'timeout': 18000 @@ -306,7 +339,10 @@ def get_env_setup_cmd(build_device, lttng_tools_commit, lttng_ust_commit=None): }) vlttng_cmd = 'vlttng --jobs=$(nproc) --profile urcu-master' \ - ' --profile babeltrace-stable-1.4 ' \ + ' --override projects.babeltrace.build-env.PYTHON=python3' \ + ' --override projects.babeltrace.build-env.PYTHON_CONFIG=python3-config' \ + ' --profile babeltrace-stable-1.4' \ + ' --profile babeltrace-python' \ ' --profile lttng-tools-master' \ ' --override projects.lttng-tools.checkout='+lttng_tools_commit + \ ' --profile lttng-tools-no-man-pages' @@ -354,9 +390,9 @@ def main(): lava_api_key = None try: - lava_api_key = os.environ['LAVA_FRDESO_TOKEN'] + lava_api_key = os.environ['LAVA_JENKINS_TOKEN'] except Exception as e: - print('LAVA_FRDESO_TOKEN not found in the environment variable. Exiting...', e ) + print('LAVA_JENKINS_TOKEN not found in the environment variable. Exiting...', e ) return -1 if test_type is TestType.baremetal_benchmarks: @@ -400,6 +436,7 @@ def main(): jobid = server.scheduler.submit_job(json.dumps(j)) print('Lava jobid:{}'.format(jobid)) + print('Lava job URL: http://lava-master.internal.efficios.com/scheduler/job/{}/log_file'.format(jobid)) #Check the status of the job every 30 seconds jobstatus = server.scheduler.job_status(jobid)['job_status']