| 1 | /** |
| 2 | * Copyright (C) 2017 - Francis Deslauriers <francis.deslauriers@efficios.com> |
| 3 | * |
| 4 | * This program is free software: you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation, either version 3 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | import hudson.console.HyperlinkNote |
| 19 | import hudson.model.* |
| 20 | import java.io.File |
| 21 | import org.eclipse.jgit.api.Git |
| 22 | import org.eclipse.jgit.lib.Ref |
| 23 | |
| 24 | class InvalidKVersionException extends Exception { |
| 25 | public InvalidKVersionException(String message) { |
| 26 | super(message) |
| 27 | } |
| 28 | } |
| 29 | |
| 30 | class EmptyKVersionException extends Exception { |
| 31 | public EmptyKVersionException(String message) { |
| 32 | super(message) |
| 33 | } |
| 34 | } |
| 35 | |
| 36 | class VanillaKVersion implements Comparable<VanillaKVersion> { |
| 37 | |
| 38 | Integer major = 0 |
| 39 | Integer majorB = 0 |
| 40 | Integer minor = 0 |
| 41 | Integer patch = 0 |
| 42 | Integer rc = Integer.MAX_VALUE |
| 43 | Boolean inStable = false; |
| 44 | |
| 45 | VanillaKVersion() {} |
| 46 | |
| 47 | VanillaKVersion(version) { |
| 48 | this.parse(version) |
| 49 | } |
| 50 | |
| 51 | static VanillaKVersion minKVersion() { |
| 52 | return new VanillaKVersion("v0.0.0") |
| 53 | } |
| 54 | |
| 55 | static VanillaKVersion maxKVersion() { |
| 56 | return new VanillaKVersion("v" + Integer.MAX_VALUE + ".0.0") |
| 57 | } |
| 58 | |
| 59 | static VanillaKVersion factory(version) { |
| 60 | return new VanillaKVersion(version) |
| 61 | } |
| 62 | |
| 63 | def parse(version) { |
| 64 | this.major = 0 |
| 65 | this.majorB = 0 |
| 66 | this.minor = 0 |
| 67 | this.patch = 0 |
| 68 | this.rc = Integer.MAX_VALUE |
| 69 | |
| 70 | if (!version) { |
| 71 | throw new EmptyKVersionException("Empty kernel version") |
| 72 | } |
| 73 | |
| 74 | def match = version =~ /^v(\d+)\.(\d+)(\.(\d+))?(\.(\d+))?(-rc(\d+))?$/ |
| 75 | if (!match) { |
| 76 | throw new InvalidKVersionException("Invalid kernel version: ${version}") |
| 77 | } |
| 78 | |
| 79 | Integer offset = 0; |
| 80 | |
| 81 | // Major |
| 82 | this.major = Integer.parseInt(match.group(1)) |
| 83 | if (this.major <= 2) { |
| 84 | offset = 2 |
| 85 | this.majorB = Integer.parseInt(match.group(2)) |
| 86 | } |
| 87 | |
| 88 | // Minor |
| 89 | if (match.group(2 + offset) != null) { |
| 90 | this.minor = Integer.parseInt(match.group(2 + offset)) |
| 91 | } |
| 92 | |
| 93 | // Patch level |
| 94 | if (match.group(4 + offset) != null) { |
| 95 | this.patch = Integer.parseInt(match.group(4 + offset)) |
| 96 | this.inStable = true |
| 97 | } |
| 98 | |
| 99 | // RC |
| 100 | if (match.group(8) != null) { |
| 101 | this.rc = Integer.parseInt(match.group(8)) |
| 102 | } |
| 103 | } |
| 104 | |
| 105 | Boolean isInStableBranch() { |
| 106 | return this.inStable |
| 107 | } |
| 108 | |
| 109 | // Return true if both version are of the same stable branch |
| 110 | Boolean isSameStable(VanillaKVersion o) { |
| 111 | if (this.major != o.major) { |
| 112 | return false |
| 113 | } |
| 114 | if (this.majorB != o.majorB) { |
| 115 | return false |
| 116 | } |
| 117 | if (this.minor != o.minor) { |
| 118 | return false |
| 119 | } |
| 120 | |
| 121 | return true |
| 122 | } |
| 123 | |
| 124 | @Override int compareTo(VanillaKVersion o) { |
| 125 | if (this.major != o.major) { |
| 126 | return Integer.compare(this.major, o.major) |
| 127 | } |
| 128 | if (this.majorB != o.majorB) { |
| 129 | return Integer.compare(this.majorB, o.majorB) |
| 130 | } |
| 131 | if (this.minor != o.minor) { |
| 132 | return Integer.compare(this.minor, o.minor) |
| 133 | } |
| 134 | if (this.patch != o.patch) { |
| 135 | return Integer.compare(this.patch, o.patch) |
| 136 | } |
| 137 | if (this.rc != o.rc) { |
| 138 | return Integer.compare(this.rc, o.rc) |
| 139 | } |
| 140 | |
| 141 | // Same version |
| 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | String toString() { |
| 146 | String vString = "v${this.major}" |
| 147 | |
| 148 | if (this.majorB > 0) { |
| 149 | vString = vString.concat(".${this.majorB}") |
| 150 | } |
| 151 | |
| 152 | vString = vString.concat(".${this.minor}") |
| 153 | |
| 154 | if (this.patch > 0) { |
| 155 | vString = vString.concat(".${this.patch}") |
| 156 | } |
| 157 | |
| 158 | if (this.rc > 0 && this.rc < Integer.MAX_VALUE) { |
| 159 | vString = vString.concat("-rc${this.rc}") |
| 160 | } |
| 161 | return vString |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | // Save the hashmap containing all the jobs and their status to disk. We can do |
| 166 | // that because this job is configured to always run on the master node on |
| 167 | // Jenkins. |
| 168 | def SaveCurrentJobsToWorkspace = { currentJobs, ondiskpath-> |
| 169 | try { |
| 170 | File myFile = new File(ondiskpath); |
| 171 | myFile.createNewFile(); |
| 172 | def out = new ObjectOutputStream(new FileOutputStream(ondiskpath)) |
| 173 | out.writeObject(currentJobs) |
| 174 | out.close() |
| 175 | } catch (e) { |
| 176 | println("Failed to save previous Git object IDs to disk." + e); |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | // Load the hashmap containing all the jobs and their last status from disk. |
| 181 | // It's possible because this job is configured to always run on the master |
| 182 | // node on Jenkins |
| 183 | def LoadPreviousJobsFromWorkspace = { ondiskpath -> |
| 184 | def previousJobs = [:] |
| 185 | try { |
| 186 | File myFile = new File(ondiskpath); |
| 187 | def input = new ObjectInputStream(new FileInputStream(ondiskpath)) |
| 188 | previousJobs = input.readObject() |
| 189 | input.close() |
| 190 | } catch (e) { |
| 191 | println("Failed to load previous runs from disk." + e); |
| 192 | } |
| 193 | return previousJobs |
| 194 | } |
| 195 | |
| 196 | |
| 197 | def GetHeadCommits = { remoteRepo, branchesOfInterest -> |
| 198 | def remoteHeads = [:] |
| 199 | def remoteHeadRefs = Git.lsRemoteRepository() |
| 200 | .setTags(false) |
| 201 | .setHeads(true) |
| 202 | .setRemote(remoteRepo).call() |
| 203 | |
| 204 | remoteHeadRefs.each { |
| 205 | def branch = it.getName().replaceAll('refs/heads/', '') |
| 206 | if (branchesOfInterest.contains(branch)) |
| 207 | remoteHeads[branch] = it.getObjectId().name() |
| 208 | } |
| 209 | |
| 210 | return remoteHeads |
| 211 | } |
| 212 | |
| 213 | def GetTagIds = { remoteRepo -> |
| 214 | def remoteTags = [:] |
| 215 | def remoteTagRefs = Git.lsRemoteRepository() |
| 216 | .setTags(true) |
| 217 | .setHeads(false) |
| 218 | .setRemote(remoteRepo).call() |
| 219 | |
| 220 | remoteTagRefs.each { |
| 221 | // Exclude release candidate tags |
| 222 | if (!it.getName().contains('-rc')) { |
| 223 | remoteTags[it.getName().replaceAll('refs/tags/', '')] = it.getObjectId().name() |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | return remoteTags |
| 228 | } |
| 229 | |
| 230 | def GetLastTagOfBranch = { tagRefs, branch -> |
| 231 | def tagVersions = tagRefs.collect {new VanillaKVersion(it.key)} |
| 232 | def currMax = new VanillaKVersion('v0.0.0'); |
| 233 | if (!branch.contains('master')){ |
| 234 | def targetVersion = new VanillaKVersion(branch.replaceAll('linux-', 'v').replaceAll('.y', '')) |
| 235 | tagVersions.each { |
| 236 | if (it.isSameStable(targetVersion)) { |
| 237 | if (currMax < it) { |
| 238 | currMax = it; |
| 239 | } |
| 240 | } |
| 241 | } |
| 242 | } else { |
| 243 | tagVersions.each { |
| 244 | if (!it.isInStableBranch() && currMax < it) { |
| 245 | currMax = it; |
| 246 | } |
| 247 | } |
| 248 | } |
| 249 | return currMax.toString() |
| 250 | } |
| 251 | |
| 252 | // Returns the latest tags of each of the branches passed in the argument |
| 253 | def GetLastTagIds = { remoteRepo, branchesOfInterest -> |
| 254 | def remoteHeads = GetHeadCommits(remoteRepo, branchesOfInterest) |
| 255 | def remoteTagRefs = GetTagIds(remoteRepo) |
| 256 | def remoteLastTagCommit = [:] |
| 257 | |
| 258 | remoteTagRefs = remoteTagRefs.findAll { !it.key.contains("v2.") } |
| 259 | branchesOfInterest.each { |
| 260 | remoteLastTagCommit[it] = remoteTagRefs[GetLastTagOfBranch(remoteTagRefs, it)] |
| 261 | } |
| 262 | |
| 263 | return remoteLastTagCommit |
| 264 | } |
| 265 | |
| 266 | def CraftJobName = { jobType, linuxBranch, lttngBranch -> |
| 267 | return "${jobType}_k${linuxBranch}_l${lttngBranch}" |
| 268 | } |
| 269 | |
| 270 | def LaunchJob = { jobName, jobInfo -> |
| 271 | def job = Hudson.instance.getJob(jobName) |
| 272 | def params = [] |
| 273 | for (paramdef in job.getProperty(ParametersDefinitionProperty.class).getParameterDefinitions()) { |
| 274 | // If there is a default value for this parameter, use it. Don't use empty |
| 275 | // default value parameters. |
| 276 | if (paramdef.getDefaultParameterValue() != null) { |
| 277 | params += paramdef.getDefaultParameterValue(); |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | params.add(new StringParameterValue('LTTNG_TOOLS_COMMIT_ID', jobInfo['config']['toolsCommit'])) |
| 282 | params.add(new StringParameterValue('LTTNG_MODULES_COMMIT_ID', jobInfo['config']['modulesCommit'])) |
| 283 | params.add(new StringParameterValue('LTTNG_UST_COMMIT_ID', jobInfo['config']['ustCommit'])) |
| 284 | params.add(new StringParameterValue('KERNEL_TAG_ID', jobInfo['config']['linuxTagID'])) |
| 285 | def currBuild = job.scheduleBuild2(0, new Cause.UpstreamCause(build), new ParametersAction(params)) |
| 286 | |
| 287 | if (currBuild != null ) { |
| 288 | println("Launching job: ${HyperlinkNote.encodeTo('/' + job.url, job.fullDisplayName)}"); |
| 289 | } else { |
| 290 | println("Job ${jobName} not found or deactivated."); |
| 291 | } |
| 292 | |
| 293 | return currBuild |
| 294 | } |
| 295 | |
| 296 | final String toolsRepo = "https://github.com/lttng/lttng-tools.git" |
| 297 | final String modulesRepo = "https://github.com/lttng/lttng-modules.git" |
| 298 | final String ustRepo = "https://github.com/lttng/lttng-ust.git" |
| 299 | final String linuxRepo = "git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git" |
| 300 | |
| 301 | final String pastJobsPath = build.getEnvironment(listener).get('WORKSPACE') + "/pastjobs"; |
| 302 | |
| 303 | def recentLttngBranchesOfInterest = ['master', |
| 304 | 'stable-2.13', |
| 305 | 'stable-2.12'] |
| 306 | def recentLinuxBranchesOfInterest = ['master', |
| 307 | 'linux-6.1.y', |
| 308 | 'linux-5.15.y', |
| 309 | 'linux-5.10.y', |
| 310 | 'linux-5.4.y', |
| 311 | 'linux-4.19.y', |
| 312 | 'linux-4.14.y', |
| 313 | ] |
| 314 | |
| 315 | def legacyLttngBranchesOfInterest = [] |
| 316 | def legacyLinuxBranchesOfInterest = [ |
| 317 | 'linux-5.14.y', |
| 318 | 'linux-4.18.y', |
| 319 | 'linux-4.12.y', |
| 320 | 'linux-4.9.y', |
| 321 | ] |
| 322 | |
| 323 | def vmLinuxBranchesOfInterest = [] |
| 324 | |
| 325 | // Generate configurations of interest. |
| 326 | def configurationOfInterest = [] as Set |
| 327 | |
| 328 | recentLttngBranchesOfInterest.each { lttngBranch -> |
| 329 | recentLinuxBranchesOfInterest.each { linuxBranch -> |
| 330 | configurationOfInterest.add([lttngBranch, linuxBranch]) |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | legacyLttngBranchesOfInterest.each { lttngBranch -> |
| 335 | legacyLinuxBranchesOfInterest.each { linuxBranch -> |
| 336 | configurationOfInterest.add([lttngBranch, linuxBranch]) |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | def lttngBranchesOfInterest = recentLttngBranchesOfInterest + legacyLttngBranchesOfInterest |
| 341 | def linuxBranchesOfInterest = recentLinuxBranchesOfInterest + legacyLinuxBranchesOfInterest + vmLinuxBranchesOfInterest |
| 342 | |
| 343 | // For LTTng branches, we look for new commits. |
| 344 | def toolsHeadCommits = GetHeadCommits(toolsRepo, lttngBranchesOfInterest) |
| 345 | def modulesHeadCommits = GetHeadCommits(modulesRepo, lttngBranchesOfInterest) |
| 346 | def ustHeadCommits = GetHeadCommits(ustRepo, lttngBranchesOfInterest) |
| 347 | |
| 348 | // For Linux branches, we look for new non-RC tags. |
| 349 | def linuxLastTagIds = GetLastTagIds(linuxRepo, linuxBranchesOfInterest) |
| 350 | |
| 351 | def CraftConfig = { linuxBr, lttngBr -> |
| 352 | def job = [:]; |
| 353 | job['config'] = [:]; |
| 354 | job['config']['linuxBranch'] = linuxBr; |
| 355 | job['config']['lttngBranch'] = lttngBr; |
| 356 | job['config']['linuxTagID'] = linuxLastTagIds[linuxBr]; |
| 357 | job['config']['toolsCommit'] = toolsHeadCommits[lttngBr]; |
| 358 | job['config']['modulesCommit'] = modulesHeadCommits[lttngBr]; |
| 359 | job['config']['ustCommit'] = ustHeadCommits[lttngBr]; |
| 360 | job['status'] = 'NOT_SET'; |
| 361 | job['build'] = null; |
| 362 | return job; |
| 363 | } |
| 364 | |
| 365 | // Check what type of jobs should be triggered. |
| 366 | triggerJobName = build.project.getFullDisplayName(); |
| 367 | if (triggerJobName.contains("vm_tests")) { |
| 368 | jobType = 'vm_tests'; |
| 369 | recentLttngBranchesOfInterest.each { lttngBranch -> |
| 370 | vmLinuxBranchesOfInterest.each { linuxBranch -> |
| 371 | configurationOfInterest.add([lttngBranch, linuxBranch]) |
| 372 | } |
| 373 | } |
| 374 | } else if (triggerJobName.contains("baremetal_tests")) { |
| 375 | jobType = 'baremetal_tests'; |
| 376 | } |
| 377 | |
| 378 | // Hashmap containing all the jobs, their configuration (commit id, etc. )and |
| 379 | // their status (SUCCEEDED, FAILED, etc.). This Hashmap is made of basic strings |
| 380 | // rather than objects and enums because strings are easily serializable. |
| 381 | def currentJobs = [:]; |
| 382 | |
| 383 | // Get an up to date view of all the branches of interest. |
| 384 | configurationOfInterest.each { lttngBr, linuxBr -> |
| 385 | def jobName = CraftJobName(jobType, linuxBr, lttngBr); |
| 386 | currentJobs[jobName] = CraftConfig(linuxBr, lttngBr); |
| 387 | } |
| 388 | |
| 389 | //Add canary job |
| 390 | def jobNameCanary = jobType + "_kcanary_lcanary"; |
| 391 | currentJobs[jobNameCanary] = [:]; |
| 392 | currentJobs[jobNameCanary]['config'] = [:]; |
| 393 | currentJobs[jobNameCanary]['config']['linuxBranch'] = 'v5.15.112'; |
| 394 | currentJobs[jobNameCanary]['config']['lttngBranch'] = 'v2.13.9'; |
| 395 | currentJobs[jobNameCanary]['config']['linuxTagID'] ='9d6bde853685609a631871d7c12be94fdf8d912e'; // v5.15.112 |
| 396 | currentJobs[jobNameCanary]['config']['toolsCommit'] = '2ff0385718ff894b3d0e06f3961334c20c5436f8' // v2.13.9 |
| 397 | currentJobs[jobNameCanary]['config']['modulesCommit'] = 'da1f5a264fff33fc5a9518e519fb0084bf1074af' // v2.13.9 |
| 398 | currentJobs[jobNameCanary]['config']['ustCommit'] = 'de624c20694f69702b42c5d47b5bcf692293a238' // v2.13.5 |
| 399 | currentJobs[jobNameCanary]['status'] = 'NOT_SET'; |
| 400 | currentJobs[jobNameCanary]['build'] = null; |
| 401 | |
| 402 | def pastJobs = LoadPreviousJobsFromWorkspace(pastJobsPath); |
| 403 | |
| 404 | def failedRuns = [] |
| 405 | def abortedRuns = [] |
| 406 | def isFailed = false |
| 407 | def isAborted = false |
| 408 | def ongoingJobs = 0; |
| 409 | |
| 410 | currentJobs.each { jobName, jobInfo -> |
| 411 | // If the job ran in the past, we check if the IDs changed since. |
| 412 | // Fetch past results only if the job is not of type canary. |
| 413 | if (!jobName.contains('_kcanary_lcanary') && pastJobs.containsKey(jobName) && |
| 414 | build.getBuildVariables().get('FORCE_JOB_RUN') == 'false') { |
| 415 | pastJob = pastJobs[jobName]; |
| 416 | |
| 417 | // If the code has not changed report previous status. |
| 418 | if (pastJob['config'] == jobInfo['config']) { |
| 419 | // if the config has not changed, we keep it. |
| 420 | // if it's failed, we don't launch a new job and keep it failed. |
| 421 | jobInfo['status'] = pastJob['status']; |
| 422 | if (pastJob['status'] == 'FAILED' && |
| 423 | build.getBuildVariables().get('FORCE_FAILED_JOB_RUN') == 'false') { |
| 424 | println("${jobName} as not changed since the last failed run. Don't run it again."); |
| 425 | // Marked the umbrella job for failure but still run the jobs that since the |
| 426 | // last run. |
| 427 | isFailed = true; |
| 428 | return; |
| 429 | } else if (pastJob['status'] == 'ABORTED') { |
| 430 | println("${jobName} as not changed since last aborted run. Run it again."); |
| 431 | } else if (pastJob['status'] == 'SUCCEEDED') { |
| 432 | println("${jobName} as not changed since the last successful run. Don't run it again."); |
| 433 | return; |
| 434 | } |
| 435 | } |
| 436 | } |
| 437 | |
| 438 | jobInfo['status'] = 'PENDING'; |
| 439 | jobInfo['build'] = LaunchJob(jobName, jobInfo); |
| 440 | ongoingJobs += 1; |
| 441 | } |
| 442 | |
| 443 | while (ongoingJobs > 0) { |
| 444 | currentJobs.each { jobName, jobInfo -> |
| 445 | |
| 446 | if (jobInfo['status'] != 'PENDING') { |
| 447 | return; |
| 448 | } |
| 449 | |
| 450 | jobBuild = jobInfo['build'] |
| 451 | |
| 452 | // The isCancelled() method checks if the run was cancelled before |
| 453 | // execution. We consider such run as being aborted. |
| 454 | if (jobBuild.isCancelled()) { |
| 455 | println("${jobName} was cancelled before launch.") |
| 456 | isAborted = true; |
| 457 | abortedRuns.add(jobName); |
| 458 | ongoingJobs -= 1; |
| 459 | jobInfo['status'] = 'ABORTED' |
| 460 | // Invalidate the build field, as it's not serializable and we don't need |
| 461 | // it anymore. |
| 462 | jobInfo['build'] = null; |
| 463 | } else if (jobBuild.isDone()) { |
| 464 | |
| 465 | jobExitStatus = jobBuild.get(); |
| 466 | |
| 467 | // Invalidate the build field, as it's not serializable and we don't need |
| 468 | // it anymore. |
| 469 | jobInfo['build'] = null; |
| 470 | println("${jobExitStatus.fullDisplayName} completed with status ${jobExitStatus.result}."); |
| 471 | |
| 472 | // If the job didn't succeed, add its name to the right list so it can |
| 473 | // be printed at the end of the execution. |
| 474 | ongoingJobs -= 1; |
| 475 | switch (jobExitStatus.result) { |
| 476 | case Result.ABORTED: |
| 477 | isAborted = true; |
| 478 | abortedRuns.add(jobName); |
| 479 | jobInfo['status'] = 'ABORTED' |
| 480 | break; |
| 481 | case Result.FAILURE: |
| 482 | isFailed = true; |
| 483 | failedRuns.add(jobName); |
| 484 | jobInfo['status'] = 'FAILED' |
| 485 | break; |
| 486 | case Result.SUCCESS: |
| 487 | jobInfo['status'] = 'SUCCEEDED' |
| 488 | break; |
| 489 | default: |
| 490 | break; |
| 491 | } |
| 492 | } |
| 493 | } |
| 494 | |
| 495 | // Sleep before the next iteration. |
| 496 | try { |
| 497 | Thread.sleep(30000) |
| 498 | } catch(e) { |
| 499 | if (e in InterruptedException) { |
| 500 | build.setResult(hudson.model.Result.ABORTED) |
| 501 | throw new InterruptedException() |
| 502 | } else { |
| 503 | throw(e) |
| 504 | } |
| 505 | } |
| 506 | } |
| 507 | |
| 508 | //All jobs are done running. Save their exit status to disk. |
| 509 | SaveCurrentJobsToWorkspace(currentJobs, pastJobsPath); |
| 510 | |
| 511 | // Get log of failed runs. |
| 512 | if (failedRuns.size() > 0) { |
| 513 | println("Failed job(s):"); |
| 514 | for (failedRun in failedRuns) { |
| 515 | println("\t" + failedRun) |
| 516 | } |
| 517 | } |
| 518 | |
| 519 | // Get log of aborted runs. |
| 520 | if (abortedRuns.size() > 0) { |
| 521 | println("Cancelled job(s):"); |
| 522 | for (cancelledRun in abortedRuns) { |
| 523 | println("\t" + cancelledRun) |
| 524 | } |
| 525 | } |
| 526 | |
| 527 | // Mark this build as Failed if atleast one child build has failed and mark as |
| 528 | // aborted if there was no failure but atleast one job aborted. |
| 529 | if (isFailed) { |
| 530 | build.setResult(hudson.model.Result.FAILURE) |
| 531 | } else if (isAborted) { |
| 532 | build.setResult(hudson.model.Result.ABORTED) |
| 533 | } |