func runStart()

in cmd/minikube/cmd/start.go [137:276]


func runStart(cmd *cobra.Command, args []string) {
	register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
	ctx := context.Background()
	out.SetJSON(outputFormat == "json")
	if err := pkgtrace.Initialize(viper.GetString(trace)); err != nil {
		exit.Message(reason.Usage, "error initializing tracing: {{.Error}}", out.V{"Error": err.Error()})
	}
	defer pkgtrace.Cleanup()
	displayVersion(version.GetVersion())
	go download.CleanUpOlderPreloads()

	// No need to do the update check if no one is going to see it
	if !viper.GetBool(interactive) || !viper.GetBool(dryRun) {
		// Avoid blocking execution on optional HTTP fetches
		go notify.MaybePrintUpdateTextFromGithub()
	}

	displayEnviron(os.Environ())
	if viper.GetBool(force) {
		out.WarningT("minikube skips various validations when --force is supplied; this may lead to unexpected behavior")
	}

	// if --registry-mirror specified when run minikube start,
	// take arg precedence over MINIKUBE_REGISTRY_MIRROR
	// actually this is a hack, because viper 1.0.0 can assign env to variable if StringSliceVar
	// and i can't update it to 1.4.0, it affects too much code
	// other types (like String, Bool) of flag works, so imageRepository, imageMirrorCountry
	// can be configured as MINIKUBE_IMAGE_REPOSITORY and IMAGE_MIRROR_COUNTRY
	// this should be updated to documentation
	if len(registryMirror) == 0 {
		registryMirror = viper.GetStringSlice("registry-mirror")
	}

	if !config.ProfileNameValid(ClusterFlagValue()) {
		out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()})
		exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 2 characters, starting with alphanumeric.")
	}
	existing, err := config.Load(ClusterFlagValue())
	if err != nil && !config.IsNotExist(err) {
		kind := reason.HostConfigLoad
		if config.IsPermissionDenied(err) {
			kind = reason.HostHomePermission
		}
		exit.Message(kind, "Unable to load config: {{.error}}", out.V{"error": err})
	}

	if existing != nil {
		upgradeExistingConfig(cmd, existing)
	} else {
		validateProfileName()
	}

	validateSpecifiedDriver(existing)
	validateKubernetesVersion(existing)

	ds, alts, specified := selectDriver(existing)
	if cmd.Flag(kicBaseImage).Changed {
		if !isBaseImageApplicable(ds.Name) {
			exit.Message(reason.Usage,
				"flag --{{.imgFlag}} is not available for driver '{{.driver}}'. Did you mean to use '{{.docker}}' or '{{.podman}}' driver instead?\n"+
					"Please use --{{.isoFlag}} flag to configure VM based drivers",
				out.V{
					"imgFlag": kicBaseImage,
					"driver":  ds.Name,
					"docker":  registry.Docker,
					"podman":  registry.Podman,
					"isoFlag": isoURL,
				},
			)
		}
	}

	starter, err := provisionWithDriver(cmd, ds, existing)
	if err != nil {
		node.ExitIfFatal(err)
		machine.MaybeDisplayAdvice(err, ds.Name)
		if specified {
			// If the user specified a driver, don't fallback to anything else
			exitGuestProvision(err)
		} else {
			success := false
			// Walk down the rest of the options
			for _, alt := range alts {
				// Skip non-default drivers
				if !alt.Default {
					continue
				}
				out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
				ds = alt
				// Delete the existing cluster and try again with the next driver on the list
				profile, err := config.LoadProfile(ClusterFlagValue())
				if err != nil {
					klog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
				}

				err = deleteProfile(ctx, profile)
				if err != nil {
					out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()})
				}
				starter, err = provisionWithDriver(cmd, ds, existing)
				if err != nil {
					continue
				} else {
					// Success!
					success = true
					break
				}
			}
			if !success {
				exitGuestProvision(err)
			}
		}
	}

	if existing != nil && driver.IsKIC(existing.Driver) {
		if viper.GetBool(createMount) {
			old := ""
			if len(existing.ContainerVolumeMounts) > 0 {
				old = existing.ContainerVolumeMounts[0]
			}
			if mount := viper.GetString(mountString); old != mount {
				exit.Message(reason.GuestMountConflict, "Sorry, {{.driver}} does not allow mounts to be changed after container creation (previous mount: '{{.old}}', new mount: '{{.new}})'", out.V{
					"driver": existing.Driver,
					"new":    mount,
					"old":    old,
				})
			}
		}
	}

	kubeconfig, err := startWithDriver(cmd, starter, existing)
	if err != nil {
		node.ExitIfFatal(err)
		exit.Error(reason.GuestStart, "failed to start node", err)
	}

	if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
		klog.Errorf("kubectl info: %v", err)
	}
}