func nodeStatus()

in cmd/minikube/cmd/status.go [314:429]


func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) {
	controlPlane := n.ControlPlane
	name := config.MachineName(cc, n)

	st := &Status{
		Name:       name,
		Host:       Nonexistent,
		APIServer:  Nonexistent,
		Kubelet:    Nonexistent,
		Kubeconfig: Nonexistent,
		Worker:     !controlPlane,
	}

	hs, err := machine.Status(api, name)
	klog.Infof("%s host status = %q (err=%v)", name, hs, err)
	if err != nil {
		return st, errors.Wrap(err, "host")
	}

	// We have no record of this host. Return nonexistent struct
	if hs == state.None.String() {
		return st, nil
	}
	st.Host = hs

	// If it's not running, quickly bail out rather than delivering conflicting messages
	if st.Host != state.Running.String() {
		klog.Infof("host is not running, skipping remaining checks")
		st.APIServer = st.Host
		st.Kubelet = st.Host
		st.Kubeconfig = st.Host
		return st, nil
	}

	// We have a fully operational host, now we can check for details
	if _, err := cluster.DriverIP(api, name); err != nil {
		klog.Errorf("failed to get driver ip: %v", err)
		st.Host = state.Error.String()
		return st, err
	}

	st.Kubeconfig = Configured
	if !controlPlane {
		st.Kubeconfig = Irrelevant
		st.APIServer = Irrelevant
	}

	host, err := machine.LoadHost(api, name)
	if err != nil {
		return st, err
	}

	cr, err := machine.CommandRunner(host)
	if err != nil {
		return st, err
	}

	// Check storage
	p, err := machine.DiskUsed(cr, "/var")
	if err != nil {
		klog.Errorf("failed to get storage capacity of /var: %v", err)
		st.Host = state.Error.String()
		return st, err
	}
	if p >= 99 {
		st.Host = codeNames[InsufficientStorage]
	}

	stk := kverify.ServiceStatus(cr, "kubelet")
	st.Kubelet = stk.String()
	if cc.ScheduledStop != nil {
		initiationTime := time.Unix(cc.ScheduledStop.InitiationTime, 0)
		st.TimeToStop = time.Until(initiationTime.Add(cc.ScheduledStop.Duration)).String()
	}
	if os.Getenv(constants.MinikubeActiveDockerdEnv) != "" {
		st.DockerEnv = "in-use"
	}
	if os.Getenv(constants.MinikubeActivePodmanEnv) != "" {
		st.PodManEnv = "in-use"
	}
	// Early exit for worker nodes
	if !controlPlane {
		return st, nil
	}

	var hostname string
	var port int
	if cc.Addons["auto-pause"] {
		hostname, _, port, err = driver.AutoPauseProxyEndpoint(&cc, &n, host.DriverName)
	} else {
		hostname, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, host.DriverName)
	}

	if err != nil {
		klog.Errorf("forwarded endpoint: %v", err)
		st.Kubeconfig = Misconfigured
	} else {
		err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port)
		if err != nil && st.Host != state.Starting.String() {
			klog.Errorf("kubeconfig endpoint: %v", err)
			st.Kubeconfig = Misconfigured
		}
	}

	sta, err := kverify.APIServerStatus(cr, hostname, port)
	klog.Infof("%s apiserver status = %s (err=%v)", name, stk, err)

	if err != nil {
		klog.Errorln("Error apiserver status:", err)
		st.APIServer = state.Error.String()
	} else {
		st.APIServer = sta.String()
	}

	return st, nil
}