kvm_tests=3
def get_job_bundle_content(server, job):
- bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
- bundle = server.dashboard.get(bundle_sha)
+ try:
+ bundle_sha = server.scheduler.job_status(str(job))['bundle_sha1']
+ bundle = server.dashboard.get(bundle_sha)
+ except Fault as f:
+ print 'Error while fetching results bundle', f
return json.loads(bundle['content'])
failed_tests+=1
return (passed_tests, failed_tests)
+# Get the benchmark results from the lava bundle
+# save them as CSV files localy
+def fetch_benchmark_results(server, job):
+ content = get_job_bundle_content(server, job)
+ testcases = ['processed_results_close.csv', 'processed_results_open_enoent.csv', 'processed_results_open_efault.csv']
+
+ # The result bundle is a large JSON containing the results of every testcase
+ # of the LAVA job as well as the files that were attached during the run.
+ # We need to iterate over this JSON to get the base64 representation of the
+ # benchmark results produced during the run.
+ for run in content['test_runs']:
+ # We only care of the benchmark testcases
+ if 'benchmark-syscall-' in run['test_id']:
+ if 'test_results' in run:
+ for res in run['test_results']:
+ if 'attachments' in res:
+ for a in res['attachments']:
+ # We only save the results file
+ if a['pathname'] in testcases:
+ with open(a['pathname'],'w') as f:
+ # Convert the b64 representation of the
+ # result file and write it to a file
+ # in the current working directory
+ f.write(base64.b64decode(a['content']))
+
# Parse the attachment of the testcase to fetch the stdout of the test suite
def print_test_output(server, job):
content = get_job_bundle_content(server, job)
parser.add_argument('-k', '--kernel', required=True)
parser.add_argument('-km', '--kmodule', required=True)
parser.add_argument('-lm', '--lmodule', required=True)
- parser.add_argument('-l', '--lava-key', required=True)
parser.add_argument('-tc', '--tools-commit', required=True)
parser.add_argument('-uc', '--ust-commit', required=False)
args = parser.parse_args()
print('argument -t/--type {} unrecognized. Exiting...'.format(args.type))
return -1
+ lava_api_key = None
+ try:
+ lava_api_key = os.environ['LAVA_FRDESO_TOKEN']
+ except Exception, e:
+ print('LAVA_FRDESO_TOKEN not found in the environment variable. Exiting...')
+ return -1
+
if test_type is TestType.baremetal_benchmarks:
j = create_new_job(args.jobname, build_device='x86')
j['actions'].append(get_deploy_cmd_x86(args.jobname, args.kernel, args.kmodule, args.lmodule))
else:
assert False, 'Unknown test type'
- server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, args.lava_key, HOSTNAME))
+ server = xmlrpclib.ServerProxy('http://%s:%s@%s/RPC2' % (USERNAME, lava_api_key, HOSTNAME))
jobid = server.scheduler.submit_job(json.dumps(j))
#Check the status of the job every 30 seconds
jobstatus = server.scheduler.job_status(jobid)['job_status']
+ not_running = False
while jobstatus in 'Submitted' or jobstatus in 'Running':
+ if not_running is False and jobstatus in 'Running':
+ print('Job started running')
+ not_running = True
time.sleep(30)
jobstatus = server.scheduler.job_status(jobid)['job_status']
passed, failed=check_job_all_test_cases_state_count(server, jobid)
- if test_type is TestType.kvm_tests:
+ if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
print_test_output(server, jobid)
+ elif test_type is TestType.baremetal_benchmarks:
+ fetch_benchmark_results(server, jobid)
print('Job ended with {} status.'.format(jobstatus))
if jobstatus not in 'Complete':