X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=scripts%2Fsystem-tests%2Fsystem-trigger.groovy;h=151e9d5ab6512e7de92e56775811bd0f878f5b8d;hb=6b35e57c434006884c10febcd85f8e77b66ae13d;hp=394227cb9645d51337b88aea9ad32a9bc980b967;hpb=02126236dc58f0d88d92c2869868af3cac3af71c;p=lttng-ci.git diff --git a/scripts/system-tests/system-trigger.groovy b/scripts/system-tests/system-trigger.groovy index 394227c..151e9d5 100644 --- a/scripts/system-tests/system-trigger.groovy +++ b/scripts/system-tests/system-trigger.groovy @@ -162,52 +162,37 @@ class VanillaKVersion implements Comparable { } } -class RunConfiguration { - def linuxBranch - def linuxTagId - def lttngBranch - def lttngModulesCommitId - def lttngToolsCommitId - def lttngUstCommitId - RunConfiguration(linuxBranch, linuxTagId, lttngBranch, lttngToolsCommitId, - lttngModulesCommitId, lttngUstCommitId) { - this.linuxBranch = linuxBranch - this.linuxTagId = linuxTagId - this.lttngBranch = lttngBranch - this.lttngModulesCommitId = lttngModulesCommitId - this.lttngToolsCommitId = lttngToolsCommitId - this.lttngUstCommitId = lttngUstCommitId - } - - String toString() { - return "${this.linuxBranch}:{${this.linuxTagId}}, ${this.lttngBranch}:{${this.lttngModulesCommitId}, ${this.lttngToolsCommitId}, ${this.lttngUstCommitId}}" +// Save the hashmap containing all the jobs and their status to disk. We can do +// that because this job is configured to always run on the master node on +// Jenkins. +def SaveCurrentJobsToWorkspace = { currentJobs, ondiskpath-> + try { + File myFile = new File(ondiskpath); + myFile.createNewFile(); + def out = new ObjectOutputStream(new FileOutputStream(ondiskpath)) + out.writeObject(currentJobs) + out.close() + } catch (e) { + println("Failed to save previous Git object IDs to disk." + e); } } -def LoadPreviousIdsFromWorkspace = { ondiskpath -> - def previousIds = [] +// Load the hashmap containing all the jobs and their last status from disk. +// It's possible because this job is configured to always run on the master +// node on Jenkins +def LoadPreviousJobsFromWorkspace = { ondiskpath -> + def previousJobs = [:] try { File myFile = new File(ondiskpath); def input = new ObjectInputStream(new FileInputStream(ondiskpath)) - previousIds = input.readObject() + previousJobs = input.readObject() input.close() - } catch (all) { - println("Failed to load previous ids from disk.") + } catch (e) { + println("Failed to load previous runs from disk." + e); } - return previousIds + return previousJobs } -def saveCurrentIdsToWorkspace = { currentIds, ondiskpath -> - try { - File myFile = new File(ondiskpath); - myFile.createNewFile(); - def out = new ObjectOutputStream(new FileOutputStream(ondiskpath)) - out.writeObject(currentIds) - out.close() - } catch (all) { - println("Failed to save previous ids from disk.") - } -} def GetHeadCommits = { remoteRepo, branchesOfInterest -> def remoteHeads = [:] @@ -278,43 +263,50 @@ def GetLastTagIds = { remoteRepo, branchesOfInterest -> return remoteLastTagCommit } -def CraftJobName = { jobType, runConfig -> - return "${jobType}_k${runConfig.linuxBranch}_l${runConfig.lttngBranch}" +def CraftJobName = { jobType, linuxBranch, lttngBranch -> + return "${jobType}_k${linuxBranch}_l${lttngBranch}" } -def LaunchJob = { jobName, runConfig -> +def LaunchJob = { jobName, jobInfo -> def job = Hudson.instance.getJob(jobName) def params = [] for (paramdef in job.getProperty(ParametersDefinitionProperty.class).getParameterDefinitions()) { - params += paramdef.getDefaultParameterValue(); + // If there is a default value for this parameter, use it. Don't use empty + // default value parameters. + if (paramdef.getDefaultValue()) { + params += paramdef.getDefaultParameterValue(); + } } - params.add(new StringParameterValue('tools_commit_id', runConfig.lttngToolsCommitId)) - params.add(new StringParameterValue('modules_commit_id', runConfig.lttngModulesCommitId)) - params.add(new StringParameterValue('ust_commit_id', runConfig.lttngUstCommitId)) - params.add(new StringParameterValue('kernel_tag_id', runConfig.linuxTagId)) - job.scheduleBuild2(0, new Cause.UpstreamCause(build), new ParametersAction(params)) - println "Launching job: ${HyperlinkNote.encodeTo('/' + job.url, job.fullDisplayName)}" + params.add(new StringParameterValue('LTTNG_TOOLS_COMMIT_ID', jobInfo['config']['toolsCommit'])) + params.add(new StringParameterValue('LTTNG_MODULES_COMMIT_ID', jobInfo['config']['modulesCommit'])) + params.add(new StringParameterValue('LTTNG_UST_COMMIT_ID', jobInfo['config']['ustCommit'])) + params.add(new StringParameterValue('KERNEL_TAG_ID', jobInfo['config']['linuxTagID'])) + def currBuild = job.scheduleBuild2(0, new Cause.UpstreamCause(build), new ParametersAction(params)) + + if (currBuild != null ) { + println("Launching job: ${HyperlinkNote.encodeTo('/' + job.url, job.fullDisplayName)}"); + } else { + println("Job ${jobName} not found or deactivated."); + } + + return currBuild } -def jobTypes = ['baremetal_tests', 'vm_tests', 'baremetal_benchmarks'] final String toolsRepo = "https://github.com/lttng/lttng-tools.git" final String modulesRepo = "https://github.com/lttng/lttng-modules.git" final String ustRepo = "https://github.com/lttng/lttng-ust.git" final String linuxRepo = "git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git" -final String toolsOnDiskPath = build.getEnvironment(listener).get('WORKSPACE') + "/on-disk-tools-ref" -final String modulesOnDiskPath = build.getEnvironment(listener).get('WORKSPACE') + "/on-disk-modules-ref" -final String ustOnDiskPath = build.getEnvironment(listener).get('WORKSPACE') + "/on-disk-ust-ref" -final String linuxOnDiskPath = build.getEnvironment(listener).get('WORKSPACE') + "/on-disk-linux-ref" +final String pastJobsPath = build.getEnvironment(listener).get('WORKSPACE') + "/pastjobs"; def recentLttngBranchesOfInterest = ['master', 'stable-2.10', 'stable-2.9'] def recentLinuxBranchesOfInterest = ['master', 'linux-4.9.y', 'linux-4.4.y'] def legacyLttngBranchesOfInterest = ['stable-2.7'] -def legacyLinuxBranchesOfInterest = ['linux-3.18.y', 'linux-4.4.y'] +def legacyLinuxBranchesOfInterest = ['linux-3.18.y'] -// Generate configurations of interest +// Generate configurations of interest. def configurationOfInterest = [] as Set recentLttngBranchesOfInterest.each { lttngBranch -> @@ -332,131 +324,196 @@ legacyLttngBranchesOfInterest.each { lttngBranch -> def lttngBranchesOfInterest = recentLttngBranchesOfInterest + legacyLttngBranchesOfInterest def linuxBranchesOfInterest = recentLinuxBranchesOfInterest + legacyLinuxBranchesOfInterest -// For Linux branches, we look for new non-RC tags +// For LTTng branches, we look for new commits. def toolsHeadCommits = GetHeadCommits(toolsRepo, lttngBranchesOfInterest) def modulesHeadCommits = GetHeadCommits(modulesRepo, lttngBranchesOfInterest) def ustHeadCommits = GetHeadCommits(ustRepo, lttngBranchesOfInterest) -// For LTTng branches, we look for new commits +// For Linux branches, we look for new non-RC tags. def linuxLastTagIds = GetLastTagIds(linuxRepo, linuxBranchesOfInterest) -// Load previously build Linux tag ids -def oldLinuxTags = LoadPreviousIdsFromWorkspace(linuxOnDiskPath) as Set - -// Load previously built LTTng commit ids -def oldToolsHeadCommits = LoadPreviousIdsFromWorkspace(toolsOnDiskPath) as Set -def oldModulesHeadCommits = LoadPreviousIdsFromWorkspace(modulesOnDiskPath) as Set -def oldUstHeadCommits = LoadPreviousIdsFromWorkspace(ustOnDiskPath) as Set - -def newOldLinuxTags = oldLinuxTags -def newOldToolsHeadCommits = oldToolsHeadCommits -def newOldModulesHeadCommits = oldModulesHeadCommits -def newOldUstHeadCommits = oldUstHeadCommits - -def canaryRunConfigs = [] as Set -canaryRunConfigs.add( - ['v4.4.9', '1a1a512b983108015ced1e7a7c7775cfeec42d8c', 'v2.8.1','d11e0db', '7fd9215', '514a87f'] as RunConfiguration) - -def runConfigs = [] as Set - -// For each top of branch kernel tags that were not seen before, schedule one -// job for each lttng/linux tracked configurations -linuxLastTagIds.each { linuxTag -> - if (!oldLinuxTags.contains(linuxTag.value)) { - lttngBranchesOfInterest.each { lttngBranch -> - if (configurationOfInterest.contains([lttngBranch, linuxTag.key])) { - runConfigs.add([linuxTag.key, linuxTag.value, - lttngBranch, toolsHeadCommits[lttngBranch], - modulesHeadCommits[lttngBranch], ustHeadCommits[lttngBranch]] - as RunConfiguration) - - newOldLinuxTags.add(linuxTag.value) - } - } +def CraftConfig = { linuxBr, lttngBr -> + def job = [:]; + job['config'] = [:]; + job['config']['linuxBranch'] = linuxBr; + job['config']['lttngBranch'] = lttngBr; + job['config']['linuxTagID'] = linuxLastTagIds[linuxBr]; + job['config']['toolsCommit'] = toolsHeadCommits[lttngBr]; + job['config']['modulesCommit'] = modulesHeadCommits[lttngBr]; + job['config']['ustCommit'] = ustHeadCommits[lttngBr]; + job['status'] = 'NOT_SET'; + job['build'] = null; + return job; +} + +// Check what type of jobs should be triggered. +triggerJobName = build.project.getFullDisplayName(); +if (triggerJobName.contains("vm_tests")) { + jobType = 'vm_tests'; +} else if (triggerJobName.contains("baremetal_tests")) { + jobType = 'baremetal_tests'; +} else if (triggerJobName.contains("baremetal_benchmarks")) { + jobType = 'baremetal_benchmarks'; +} + +// Hashmap containing all the jobs, their configuration (commit id, etc. )and +// their status (SUCCEEDED, FAILED, etc.). This Hashmap is made of basic strings +// rather than objects and enums because strings are easily serializable. +def currentJobs = [:]; + +// Get an up to date view of all the branches of interest. +configurationOfInterest.each { lttngBr, linuxBr -> + def jobName = CraftJobName(jobType, linuxBr, lttngBr); + currentJobs[jobName] = CraftConfig(linuxBr, lttngBr); + + // Add fuzzing job in vm_tests on master branches of lttng and linux. + if (jobType == 'vm_tests' && lttngBr == 'master' && linuxBr == 'master') { + def vmFuzzingJobName = CraftJobName(jobType + '_fuzzing', linuxBr, lttngBr); + currentJobs[vmFuzzingJobName] = CraftConfig(linuxBr, lttngBr); } } -// For each top of branch commits that were not seen before, schedule one job -// for each lttng/linux tracked configurations -toolsHeadCommits.each { toolsHead -> - if (!oldToolsHeadCommits.contains(toolsHead.value)) { - linuxLastTagIds.each { linuxTag -> - def lttngBranch = toolsHead.key - if (configurationOfInterest.contains([lttngBranch, linuxTag.key])) { - runConfigs.add([linuxTag.key, linuxTag.value, - lttngBranch, toolsHeadCommits[lttngBranch], - modulesHeadCommits[lttngBranch], ustHeadCommits[lttngBranch]] - as RunConfiguration) - - newOldToolsHeadCommits.add(toolsHead.value) +//Add canary job +def jobNameCanary = jobType + "_canary"; +currentJobs[jobNameCanary] = [:]; +currentJobs[jobNameCanary]['config'] = [:]; +currentJobs[jobNameCanary]['config']['linuxBranch'] = 'v4.4.9'; +currentJobs[jobNameCanary]['config']['lttngBranch'] = 'v2.8.1'; +currentJobs[jobNameCanary]['config']['linuxTagID'] ='1a1a512b983108015ced1e7a7c7775cfeec42d8c'; +currentJobs[jobNameCanary]['config']['toolsCommit'] = 'd11e0db' +currentJobs[jobNameCanary]['config']['modulesCommit'] = '7fd9215' +currentJobs[jobNameCanary]['config']['ustCommit'] = '514a87f' +currentJobs[jobNameCanary]['status'] = 'NOT_SET'; +currentJobs[jobNameCanary]['build'] = null; + +def pastJobs = LoadPreviousJobsFromWorkspace(pastJobsPath); + +def failedRuns = [] +def abortedRuns = [] +def isFailed = false +def isAborted = false +def ongoingJobs = 0; + +currentJobs.each { jobName, jobInfo -> + // If the job ran in the past, we check if the IDs changed since. + // Fetch past results only if the job is not of type canary or fuzzing. + if (!jobName.contains('_canary') && !jobName.contains('_fuzzing') && + pastJobs.containsKey(jobName)) { + pastJob = pastJobs[jobName]; + + // If the code has not changed report previous status. + if (pastJob['config'] == jobInfo['config']) { + // if the config has not changed, we keep it. + // if it's failed, we don't launch a new job and keep it failed. + jobInfo['status'] = pastJob['status']; + if (pastJob['status'] == 'FAILED') { + println("${jobName} as not changed since the last failed run. Don't run it again."); + // Marked the umbrella job for failure but still run the jobs that since the + // last run. + isFailed = true; + return; + } else if (pastJob['status'] == 'ABORTED') { + println("${jobName} as not changed since last aborted run. Run it again."); + } else if (pastJob['status'] == 'SUCCEEDED') { + println("${jobName} as not changed since the last successful run. Don't run it again."); + return; } } } + + jobInfo['status'] = 'PENDING'; + jobInfo['build'] = LaunchJob(jobName, jobInfo); + ongoingJobs += 1; } -// For each top of branch commits that were not seen before, schedule one job -// for each lttng/linux tracked configurations -modulesHeadCommits.each { modulesHead -> - if (!oldModulesHeadCommits.contains(modulesHead.value)) { - linuxLastTagIds.each { linuxTag -> - def lttngBranch = modulesHead.key - if (configurationOfInterest.contains([lttngBranch, linuxTag.key])) { - runConfigs.add([linuxTag.key, linuxTag.value, - lttngBranch, toolsHeadCommits[lttngBranch], - modulesHeadCommits[lttngBranch], ustHeadCommits[lttngBranch]] - as RunConfiguration) - - newOldModulesHeadCommits.add(modulesHead.value) +while (ongoingJobs > 0) { + currentJobs.each { jobName, jobInfo -> + + if (jobInfo['status'] != 'PENDING') { + return; + } + + jobBuild = jobInfo['build'] + + // The isCancelled() method checks if the run was cancelled before + // execution. We consider such run as being aborted. + if (jobBuild.isCancelled()) { + println("${jobName} was cancelled before launch.") + isAborted = true; + abortedRuns.add(jobName); + ongoingJobs -= 1; + jobInfo['status'] = 'ABORTED' + // Invalidate the build field, as it's not serializable and we don't need + // it anymore. + jobInfo['build'] = null; + } else if (jobBuild.isDone()) { + + jobExitStatus = jobBuild.get(); + + // Invalidate the build field, as it's not serializable and we don't need + // it anymore. + jobInfo['build'] = null; + println("${jobExitStatus.fullDisplayName} completed with status ${jobExitStatus.result}."); + + // If the job didn't succeed, add its name to the right list so it can + // be printed at the end of the execution. + ongoingJobs -= 1; + switch (jobExitStatus.result) { + case Result.ABORTED: + isAborted = true; + abortedRuns.add(jobName); + jobInfo['status'] = 'ABORTED' + break; + case Result.FAILURE: + isFailed = true; + failedRuns.add(jobName); + jobInfo['status'] = 'FAILED' + break; + case Result.SUCCESS: + jobInfo['status'] = 'SUCCEEDED' + break; + default: + break; } } } -} -// For each top of branch commits that were not seen before, schedule one job -// for each lttng/linux tracked configurations -ustHeadCommits.each { ustHead -> - if (!oldUstHeadCommits.contains(ustHead.value)) { - linuxLastTagIds.each { linuxTag -> - def lttngBranch = ustHead.key - if (configurationOfInterest.contains([lttngBranch, linuxTag.key])) { - runConfigs.add([linuxTag.key, linuxTag.value, - lttngBranch, toolsHeadCommits[lttngBranch], - modulesHeadCommits[lttngBranch], ustHeadCommits[lttngBranch]] - as RunConfiguration) - - newOldUstHeadCommits.add(ustHead.value) - } + // Sleep before the next iteration. + try { + Thread.sleep(30000) + } catch(e) { + if (e in InterruptedException) { + build.setResult(hudson.model.Result.ABORTED) + throw new InterruptedException() + } else { + throw(e) } } } -// Save the tag and commit IDs scheduled in the past and during this run to the workspace -saveCurrentIdsToWorkspace(newOldLinuxTags, linuxOnDiskPath) -saveCurrentIdsToWorkspace(newOldToolsHeadCommits, toolsOnDiskPath) -saveCurrentIdsToWorkspace(newOldModulesHeadCommits, modulesOnDiskPath) -saveCurrentIdsToWorkspace(newOldUstHeadCommits, ustOnDiskPath) - -// Launch jobs -println("Schedule canary jobs once a day") -canaryRunConfigs.each { config -> - jobTypes.each { type -> - LaunchJob(type + '_canary', config) +//All jobs are done running. Save their exit status to disk. +SaveCurrentJobsToWorkspace(currentJobs, pastJobsPath); + +// Get log of failed runs. +if (failedRuns.size() > 0) { + println("Failed job(s):"); + for (failedRun in failedRuns) { + println("\t" + failedRun) } } -if (runConfigs.size() > 0) { - println("Schedule jobs because of code changes.") - runConfigs.each { config -> - jobTypes.each { type -> - LaunchJob(CraftJobName(type, config), config); - } - - // Jobs to run only on master branchs of both linux and lttng - if (config.linuxBranch.contains('master') && - config.lttngBranch.contains('master')) { - LaunchJob(CraftJobName('vm_tests_fuzzing', config), config) - } +// Get log of aborted runs. +if (abortedRuns.size() > 0) { + println("Cancelled job(s):"); + for (cancelledRun in abortedRuns) { + println("\t" + cancelledRun) } -} else { - println("No new commit or tags, nothing more to do.") +} + +// Mark this build as Failed if atleast one child build has failed and mark as +// aborted if there was no failure but atleast one job aborted. +if (isFailed) { + build.setResult(hudson.model.Result.FAILURE) +} else if (isAborted) { + build.setResult(hudson.model.Result.ABORTED) }