func run()

in kubetest/e2e.go [50:333]


func run(deploy deployer, o options) error {
	cmd, err := deploy.KubectlCommand()
	if err != nil {
		return err
	}
	if cmd == nil {
		cmd = exec.Command("./cluster/kubectl.sh")
	}
	if o.checkSkew {
		cmd.Args = append(cmd.Args, "--match-server-version")
	}
	os.Setenv("KUBECTL", strings.Join(cmd.Args, " "))

	os.Setenv("KUBE_CONFIG_FILE", "config-test.sh")
	os.Setenv("KUBE_RUNTIME_CONFIG", o.runtimeConfig)

	var errs []error

	dump, err := util.OptionalAbsPath(o.dump)
	if err != nil {
		return fmt.Errorf("failed handling --dump path: %w", err)
	}

	dumpPreTestLogs, err := util.OptionalAbsPath(o.dumpPreTestLogs)
	if err != nil {
		return fmt.Errorf("failed handling --dump-pre-test-logs path: %w", err)
	}

	if o.up {
		if err := control.XMLWrap(&suite, "TearDown Previous", deploy.Down); err != nil {
			return fmt.Errorf("error tearing down previous cluster: %s", err)
		}
	}

	// Ensures that the cleanup/down action is performed exactly once.
	var (
		downDone = false
	)

	var (
		beforeResources []byte
		upResources     []byte
		downResources   []byte
		afterResources  []byte
	)

	if o.checkLeaks {
		errs = util.AppendError(errs, control.XMLWrap(&suite, "listResources Before", func() error {
			beforeResources, err = listResources()
			return err
		}))
	}

	if o.up {
		// If we tried to bring the cluster up, make a courtesy
		// attempt to bring it down so we're not leaving resources around.
		if o.down {
			defer control.XMLWrap(&suite, "Deferred TearDown", func() error {
				if !downDone {
					return deploy.Down()
				}
				return nil
			})
		}
		// Start the cluster using this version.
		if err := control.XMLWrap(&suite, "Up", deploy.Up); err != nil {
			if dump != "" {
				control.XMLWrap(&suite, "DumpClusterLogs (--up failed)", func() error {
					// This frequently means the cluster does not exist.
					// Thus DumpClusterLogs() typically fails.
					// Therefore always return null for this scenarios.
					// TODO(fejta): report a green E in testgrid if it errors.
					deploy.DumpClusterLogs(dump, o.logexporterGCSPath)
					return nil
				})
			}
			return fmt.Errorf("starting e2e cluster: %s", err)
		}
		// If node testing is enabled, check that the api is reachable before
		// proceeding with further steps. This is accomplished by listing the nodes.
		if !o.nodeTests && !strings.EqualFold(string(o.build), "none") {
			errs = util.AppendError(errs, control.XMLWrap(&suite, "Check APIReachability", func() error { return getKubectlVersion(deploy) }))
			if dump != "" {
				errs = util.AppendError(errs, control.XMLWrap(&suite, "list nodes", func() error {
					return listNodes(deploy, dump)
				}))
			}
		}
	}

	if o.checkLeaks {
		errs = util.AppendError(errs, control.XMLWrap(&suite, "listResources Up", func() error {
			upResources, err = listResources()
			return err
		}))
	}

	if o.upgradeArgs != "" {
		if err := control.XMLWrap(&suite, "test setup", deploy.TestSetup); err != nil {
			errs = util.AppendError(errs, err)
		} else {
			errs = util.AppendError(errs, control.XMLWrap(&suite, "UpgradeTest", func() error {
				// upgrade tests really only run one spec
				var env []string
				for _, v := range os.Environ() {
					if !strings.HasPrefix(v, "GINKGO_PARALLEL") {
						env = append(env, v)
					}
				}
				return skewTestEnv(env, argFields(o.upgradeArgs, dump, o.clusterIPRange), "upgrade", o.checkSkew)
			}))
		}
	}

	if dumpPreTestLogs != "" {
		errs = append(errs, dumpRemoteLogs(deploy, o, dumpPreTestLogs, "pre-test")...)
	}

	testArgs := argFields(o.testArgs, dump, o.clusterIPRange)
	if o.test {
		if err := control.XMLWrap(&suite, "test setup", deploy.TestSetup); err != nil {
			errs = util.AppendError(errs, err)
		} else {
			if o.preTestCmd != "" {
				errs = util.AppendError(errs, control.XMLWrap(&suite, "pre-test command", func() error {
					cmdLineTokenized := strings.Fields(os.ExpandEnv(o.preTestCmd))
					return control.FinishRunning(exec.Command(cmdLineTokenized[0], cmdLineTokenized[1:]...))
				}))
			}
			if o.nodeTests {
				nodeArgs := strings.Fields(o.nodeArgs)
				errs = util.AppendError(errs, control.XMLWrap(&suite, "Node Tests", func() error {
					return nodeTest(nodeArgs, o.testArgs, o.nodeTestArgs, o.gcpProject, o.gcpZone, o.runtimeConfig)
				}))
			} else if err := control.XMLWrap(&suite, "IsUp", deploy.IsUp); err != nil {
				errs = util.AppendError(errs, err)
			} else {
				if o.deployment != "conformance" {
					errs = util.AppendError(errs, control.XMLWrap(&suite, "kubectl version", func() error { return getKubectlVersion(deploy) }))
				}

				if o.skew {
					errs = util.AppendError(errs, control.XMLWrap(&suite, "SkewTest", func() error {
						return skewTest(testArgs, "skew", o.checkSkew)
					}))
				} else {
					var tester e2e.Tester
					tester = &GinkgoScriptTester{}
					if testBuilder, ok := deploy.(e2e.TestBuilder); ok {
						tester, err = testBuilder.BuildTester(toBuildTesterOptions(&o))
						errs = util.AppendError(errs, err)
					}
					if tester != nil {
						errs = util.AppendError(errs, control.XMLWrap(&suite, "Test", func() error {
							return tester.Run(control, testArgs)
						}))
					}
				}
			}
		}
	}

	var kubemarkUpErr error
	if o.kubemark {
		errs = util.AppendError(errs, control.XMLWrap(&suite, "Kubemark Overall", func() error {
			if kubemarkUpErr = kubemarkUp(dump, o, deploy); kubemarkUpErr != nil {
				return kubemarkUpErr
			}
			// running test in clusterloader, or other custom commands, skip the ginkgo call
			if o.testCmd != "" {
				return nil
			}
			return kubemarkGinkgoTest(testArgs, dump)
		}))
	}

	if kubemarkUpErr == nil && o.testCmd != "" {
		if err := control.XMLWrap(&suite, "test setup", deploy.TestSetup); err != nil {
			errs = util.AppendError(errs, err)
		} else {
			if o.preTestCmd != "" {
				errs = util.AppendError(errs, control.XMLWrap(&suite, "pre-test command", func() error {
					cmdLineTokenized := strings.Fields(os.ExpandEnv(o.preTestCmd))
					return control.FinishRunning(exec.Command(cmdLineTokenized[0], cmdLineTokenized[1:]...))
				}))
			}
			errs = util.AppendError(errs, control.XMLWrap(&suite, o.testCmdName, func() error {
				cmdLine := os.ExpandEnv(o.testCmd)
				return control.FinishRunning(exec.Command(cmdLine, o.testCmdArgs...))
			}))
		}
	}

	// TODO: consider remapping charts, etc to testCmd

	var kubemarkWg sync.WaitGroup
	var kubemarkDownErr error
	if o.down && o.kubemark {
		kubemarkWg.Add(1)
		go kubemarkDown(&kubemarkDownErr, &kubemarkWg, o.provider, dump, o.logexporterGCSPath)
	}

	if o.charts {
		errs = util.AppendError(errs, control.XMLWrap(&suite, "Helm Charts", chartsTest))
	}

	if dump != "" {
		errs = append(errs, dumpRemoteLogs(deploy, o, dump, "")...)
	}

	if o.checkLeaks {
		errs = util.AppendError(errs, control.XMLWrap(&suite, "listResources Down", func() error {
			downResources, err = listResources()
			return err
		}))
	}

	if o.down {
		errs = util.AppendError(errs, control.XMLWrap(&suite, "TearDown", func() error {
			if !downDone {
				err := deploy.Down()
				if err != nil {
					return err
				}
				downDone = true
			}
			return nil
		}))
	}

	// Wait for kubemarkDown step to finish before going further.
	kubemarkWg.Wait()
	errs = util.AppendError(errs, kubemarkDownErr)

	// Save the state if we upped a new cluster without downing it
	if o.save != "" && ((!o.down && o.up) || (o.up && o.deployment != "none")) {
		errs = util.AppendError(errs, control.XMLWrap(&suite, "Save Cluster State", func() error {
			return saveState(o.save)
		}))
	}

	if o.checkLeaks {
		log.Print("Sleeping for 30 seconds...") // Wait for eventually consistent listing
		time.Sleep(30 * time.Second)
		if err := control.XMLWrap(&suite, "listResources After", func() error {
			afterResources, err = listResources()
			return err
		}); err != nil {
			errs = append(errs, err)
		} else {
			errs = util.AppendError(errs, control.XMLWrap(&suite, "diffResources", func() error {
				return diffResources(beforeResources, upResources, downResources, afterResources, dump)
			}))
		}
	}
	if len(errs) == 0 {
		if pub, ok := deploy.(publisher); ok {
			errs = util.AppendError(errs, pub.Publish())
		}
	}
	if len(errs) == 0 && o.publish != "" {
		errs = util.AppendError(errs, control.XMLWrap(&suite, "Publish version", func() error {
			// Use plaintext version file packaged with kubernetes.tar.gz
			v, err := ioutil.ReadFile("version")
			if err != nil {
				return err
			}
			log.Printf("Set %s version to %s", o.publish, string(v))
			return gcsWrite(o.publish, v)
		}))
	}

	if o.postTestCmd != "" && (o.test || (kubemarkUpErr == nil && o.testCmd != "")) {
		errs = util.AppendError(errs, control.XMLWrap(&suite, "post-test command", func() error {
			cmdLineTokenized := strings.Fields(os.ExpandEnv(o.postTestCmd))
			return control.FinishRunning(exec.Command(cmdLineTokenized[0], cmdLineTokenized[1:]...))
		}))
	}

	if len(errs) != 0 {
		return fmt.Errorf("encountered %d errors: %v", len(errs), errs)
	}
	return nil
}