in pkg/minikube/node/start.go [92:218]
func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
var wg sync.WaitGroup
stopk8s, err := handleNoKubernetes(starter)
if err != nil {
return nil, err
}
if stopk8s {
nv := semver.Version{Major: 0, Minor: 0, Patch: 0}
configureRuntimes(starter.Runner, *starter.Cfg, nv)
configureMounts(&wg, *starter.Cfg)
return nil, config.Write(viper.GetString(config.ProfileName), starter.Cfg)
}
// wait for preloaded tarball to finish downloading before configuring runtimes
waitCacheRequiredImages(&cacheGroup)
sv, err := util.ParseKubernetesVersion(starter.Node.KubernetesVersion)
if err != nil {
return nil, errors.Wrap(err, "Failed to parse Kubernetes version")
}
// configure the runtime (docker, containerd, crio)
cr := configureRuntimes(starter.Runner, *starter.Cfg, sv)
// check if installed runtime is compatible with current minikube code
if err = cruntime.CheckCompatibility(cr); err != nil {
return nil, err
}
showVersionInfo(starter.Node.KubernetesVersion, cr)
// Add "host.minikube.internal" DNS alias (intentionally non-fatal)
hostIP, err := cluster.HostIP(starter.Host, starter.Cfg.Name)
if err != nil {
klog.Errorf("Unable to get host IP: %v", err)
} else if err := machine.AddHostAlias(starter.Runner, constants.HostAlias, hostIP); err != nil {
klog.Errorf("Unable to add host alias: %v", err)
}
var kcs *kubeconfig.Settings
var bs bootstrapper.Bootstrapper
if apiServer {
kcs, bs, err = handleAPIServer(starter, cr, hostIP)
if err != nil {
return nil, err
}
} else {
bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner)
if err != nil {
return nil, errors.Wrap(err, "Failed to get bootstrapper")
}
if err = bs.SetupCerts(*starter.Cfg, *starter.Node); err != nil {
return nil, errors.Wrap(err, "setting up certs")
}
if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil {
return nil, errors.Wrap(err, "update node")
}
}
go configureMounts(&wg, *starter.Cfg)
wg.Add(1)
go func() {
defer wg.Done()
profile, err := config.LoadProfile(starter.Cfg.Name)
if err != nil {
out.FailureT("Unable to load profile: {{.error}}", out.V{"error": err})
}
if err := CacheAndLoadImagesInConfig([]*config.Profile{profile}); err != nil {
out.FailureT("Unable to push cached images: {{.error}}", out.V{"error": err})
}
}()
// enable addons, both old and new!
addonList := viper.GetStringSlice(config.AddonListFlag)
if starter.ExistingAddons != nil {
if viper.GetBool("force") {
addons.Force = true
}
wg.Add(1)
go addons.Start(&wg, starter.Cfg, starter.ExistingAddons, addonList)
}
// discourage use of the virtualbox driver
if starter.Cfg.Driver == driver.VirtualBox && viper.GetBool(config.WantVirtualBoxDriverWarning) {
warnVirtualBox()
}
if apiServer {
// special ops for none , like change minikube directory.
// multinode super doesn't work on the none driver
if starter.Cfg.Driver == driver.None && len(starter.Cfg.Nodes) == 1 {
prepareNone()
}
} else {
// Make sure to use the command runner for the control plane to generate the join token
cpBs, cpr, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper))
if err != nil {
return nil, errors.Wrap(err, "getting control plane bootstrapper")
}
if err := joinCluster(starter, cpBs, bs); err != nil {
return nil, errors.Wrap(err, "joining cp")
}
cnm, err := cni.New(starter.Cfg)
if err != nil {
return nil, errors.Wrap(err, "cni")
}
if err := cnm.Apply(cpr); err != nil {
return nil, errors.Wrap(err, "cni apply")
}
}
klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node)
if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil {
return nil, errors.Wrapf(err, "wait %s for node", viper.GetDuration(waitTimeout))
}
klog.Infof("waiting for startup goroutines ...")
wg.Wait()
// Write enabled addons to the config before completion
return kcs, config.Write(viper.GetString(config.ProfileName), starter.Cfg)
}