Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 43 additions & 27 deletions pkg/test/ginkgo/cmd_runsuite.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import (
"path/filepath"
"sort"
"strings"
"sync"
"syscall"
"time"

Expand Down Expand Up @@ -135,8 +134,6 @@ func max(a, b int) int {
}

func (opt *Options) Run(suite *TestSuite, junitSuiteName string) error {
ctx := context.Background()

if len(opt.Regex) > 0 {
if err := filterWithRegex(suite, opt.Regex); err != nil {
return err
Expand Down Expand Up @@ -186,6 +183,22 @@ func (opt *Options) Run(suite *TestSuite, junitSuiteName string) error {
}
}

// This ensures that tests in the identified paths do not run in parallel, because
// the test suite reuses shared resources without considering whether another test
// could be running at the same time. While these are technically [Serial], ginkgo
// parallel mode provides this guarantee. Doing this for all suites would be too
// slow.
setTestExclusion(tests, func(suitePath string, t *testCase) bool {
for _, name := range []string{
"/k8s.io/kubernetes/test/e2e/apps/disruption.go",
} {
if strings.HasSuffix(suitePath, name) {
return true
}
}
return false
})

tests = suite.Filter(tests)
if len(tests) == 0 {
return fmt.Errorf("suite %q does not contain any tests", suite.Name)
Expand All @@ -201,18 +214,9 @@ func (opt *Options) Run(suite *TestSuite, junitSuiteName string) error {
opt.StartTime = start
}

timeout := opt.Timeout
if timeout == 0 {
timeout = suite.TestTimeout
}
if timeout == 0 {
timeout = 15 * time.Minute
}

testRunnerContext := newCommandContext(opt.AsEnv(), timeout)

if opt.PrintCommands {
newParallelTestQueue(testRunnerContext).OutputCommands(ctx, tests, opt.Out)
status := newTestStatus(opt.Out, true, len(tests), time.Minute, monitor.NewNoOpMonitor(), opt.AsEnv())
newParallelTestQueue().Execute(context.Background(), tests, 1, status.OutputCommand)
return nil
}
if opt.DryRun {
Expand Down Expand Up @@ -240,6 +244,13 @@ func (opt *Options) Run(suite *TestSuite, junitSuiteName string) error {
if parallelism == 0 {
parallelism = 10
}
timeout := opt.Timeout
if timeout == 0 {
timeout = suite.TestTimeout
}
if timeout == 0 {
timeout = 15 * time.Minute
}

ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
Expand Down Expand Up @@ -281,8 +292,6 @@ func (opt *Options) Run(suite *TestSuite, junitSuiteName string) error {
if len(tests) == 1 && count == 1 {
includeSuccess = true
}
testOutputLock := &sync.Mutex{}
testOutputConfig := newTestOutputConfig(testOutputLock, opt.Out, monitorEventRecorder, includeSuccess)

early, notEarly := splitTests(tests, func(t *testCase) bool {
return strings.Contains(t.name, "[Early]")
Expand Down Expand Up @@ -321,17 +330,23 @@ func (opt *Options) Run(suite *TestSuite, junitSuiteName string) error {
}
expectedTestCount += len(openshiftTests) + len(kubeTests) + len(storageTests) + len(mustGatherTests)

abortFn := neverAbort
status := newTestStatus(opt.Out, includeSuccess, expectedTestCount, timeout, monitorEventRecorder, opt.AsEnv())
testCtx := ctx
if opt.FailFast {
abortFn, testCtx = abortOnFailure(ctx)
var cancelFn context.CancelFunc
testCtx, cancelFn = context.WithCancel(testCtx)
status.AfterTest(func(t *testCase) {
if t.failed {
cancelFn()
}
})
}

tests = nil

// run our Early tests
q := newParallelTestQueue(testRunnerContext)
q.Execute(testCtx, early, parallelism, testOutputConfig, abortFn)
q := newParallelTestQueue()
q.Execute(testCtx, early, parallelism, status.Run)
tests = append(tests, early...)

// TODO: will move to the monitor
Expand All @@ -341,29 +356,29 @@ func (opt *Options) Run(suite *TestSuite, junitSuiteName string) error {
// we loop indefinitely.
for i := 0; (i < 1 || count == -1) && testCtx.Err() == nil; i++ {
kubeTestsCopy := copyTests(kubeTests)
q.Execute(testCtx, kubeTestsCopy, parallelism, testOutputConfig, abortFn)
q.Execute(testCtx, kubeTestsCopy, parallelism, status.Run)
tests = append(tests, kubeTestsCopy...)

// I thought about randomizing the order of the kube, storage, and openshift tests, but storage dominates our e2e runs, so it doesn't help much.
storageTestsCopy := copyTests(storageTests)
q.Execute(testCtx, storageTestsCopy, max(1, parallelism/2), testOutputConfig, abortFn) // storage tests only run at half the parallelism, so we can avoid cloud provider quota problems.
q.Execute(testCtx, storageTestsCopy, max(1, parallelism/2), status.Run) // storage tests only run at half the parallelism, so we can avoid cloud provider quota problems.
tests = append(tests, storageTestsCopy...)

openshiftTestsCopy := copyTests(openshiftTests)
q.Execute(testCtx, openshiftTestsCopy, parallelism, testOutputConfig, abortFn)
q.Execute(testCtx, openshiftTestsCopy, parallelism, status.Run)
tests = append(tests, openshiftTestsCopy...)

// run the must-gather tests after parallel tests to reduce resource contention
mustGatherTestsCopy := copyTests(mustGatherTests)
q.Execute(testCtx, mustGatherTestsCopy, parallelism, testOutputConfig, abortFn)
q.Execute(testCtx, mustGatherTestsCopy, parallelism, status.Run)
tests = append(tests, mustGatherTestsCopy...)
}

// TODO: will move to the monitor
pc.SetEvents([]string{postUpgradeEvent})

// run Late test suits after everything else
q.Execute(testCtx, late, parallelism, testOutputConfig, abortFn)
q.Execute(testCtx, late, parallelism, status.Run)
tests = append(tests, late...)

// TODO: will move to the monitor
Expand Down Expand Up @@ -406,8 +421,9 @@ func (opt *Options) Run(suite *TestSuite, junitSuiteName string) error {
}
}

q := newParallelTestQueue(testRunnerContext)
q.Execute(testCtx, retries, parallelism, testOutputConfig, abortFn)
q := newParallelTestQueue()
status := newTestStatus(ioutil.Discard, opt.IncludeSuccessOutput, len(retries), timeout, monitorEventRecorder, opt.AsEnv())
q.Execute(testCtx, retries, parallelism, status.Run)
var flaky []string
var repeatFailures []*testCase
for _, test := range retries {
Expand Down
2 changes: 1 addition & 1 deletion pkg/test/ginkgo/group_filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func (agf *apiGroupFilter) markSkippedWhenAPIGroupNotServed(tests []*testCase) {
if !agf.apiGroups.HasAll(test.apigroups...) {
missingAPIGroups := sets.NewString(test.apigroups...).Difference(agf.apiGroups)
test.skipped = true
test.testOutputBytes = []byte(fmt.Sprintf("skipped because the following required API groups are missing: %v", strings.Join(missingAPIGroups.List(), ",")))
test.out = []byte(fmt.Sprintf("skipped because the following required API groups are missing: %v", strings.Join(missingAPIGroups.List(), ",")))
continue
}
}
Expand Down
12 changes: 6 additions & 6 deletions pkg/test/ginkgo/junit.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,21 @@ func generateJUnitTestSuiteResults(
s.NumSkipped++
s.TestCases = append(s.TestCases, &junitapi.JUnitTestCase{
Name: test.name,
SystemOut: string(test.testOutputBytes),
SystemOut: string(test.out),
Duration: test.duration.Seconds(),
SkipMessage: &junitapi.SkipMessage{
Message: lastLinesUntil(string(test.testOutputBytes), 100, "skip ["),
Message: lastLinesUntil(string(test.out), 100, "skip ["),
},
})
case test.failed:
s.NumTests++
s.NumFailed++
s.TestCases = append(s.TestCases, &junitapi.JUnitTestCase{
Name: test.name,
SystemOut: string(test.testOutputBytes),
SystemOut: string(test.out),
Duration: test.duration.Seconds(),
FailureOutput: &junitapi.FailureOutput{
Output: lastLinesUntil(string(test.testOutputBytes), 100, "fail ["),
Output: lastLinesUntil(string(test.out), 100, "fail ["),
},
})
case test.success:
Expand All @@ -60,10 +60,10 @@ func generateJUnitTestSuiteResults(
s.NumFailed++
s.TestCases = append(s.TestCases, &junitapi.JUnitTestCase{
Name: test.name,
SystemOut: string(test.testOutputBytes),
SystemOut: string(test.out),
Duration: test.duration.Seconds(),
FailureOutput: &junitapi.FailureOutput{
Output: lastLinesUntil(string(test.testOutputBytes), 100, "flake:"),
Output: lastLinesUntil(string(test.out), 100, "flake:"),
},
})
}
Expand Down
Loading