in cmd/nodeadm/upgrade/upgrade.go [70:191]
func (c *command) Run(log *zap.Logger, opts *cli.GlobalOptions) error {
ctx := context.Background()
ctx = logger.NewContext(ctx, log)
root, err := cli.IsRunningAsRoot()
if err != nil {
return err
}
if !root {
return cli.ErrMustRunAsRoot
}
if c.configSource == "" {
flaggy.ShowHelpAndExit("--config-source is a required flag. The format is a URI with supported schemes: [file, imds]." +
" For example on hybrid nodes --config-source file://nodeConfig.yaml")
}
log.Info("Loading installed components")
installed, err := tracker.GetInstalledArtifacts()
if err != nil && os.IsNotExist(err) {
log.Info("No nodeadm components installed. Please use nodeadm install and nodeadm init commands to bootstrap a node")
return nil
} else if err != nil {
return err
}
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
if !slices.Contains(c.skipPhases, initNodePreflightCheck) {
log.Info("Validating if node has initialized")
if err := node.IsInitialized(ctx); err != nil {
return fmt.Errorf("node not initialized. Please use nodeadm init command to bootstrap a node. err: %v", err)
}
}
log.Info("Loading configuration..", zap.String("configSource", c.configSource))
nodeProvider, err := node.NewNodeProvider(c.configSource, c.skipPhases, log)
if err != nil {
return err
}
nodeProvider.PopulateNodeConfigDefaults()
if err := nodeProvider.ValidateConfig(); err != nil {
return err
}
credsProvider, err := creds.GetCredentialProviderFromNodeConfig(nodeProvider.GetNodeConfig())
if err != nil {
return err
}
// Validating credential provider. Upgrade does not allow changes to credential providers
installedCredsProvider, err := creds.GetCredentialProviderFromInstalledArtifacts(installed.Artifacts)
if err != nil {
return err
}
if installedCredsProvider != credsProvider {
return fmt.Errorf("upgrade does not support changing credential providers. Please uninstall and install with new credential provider")
}
log.Info("Validating Kubernetes version", zap.Reflect("kubernetes version", c.kubernetesVersion))
// Create a Source for all AWS managed artifacts.
awsSource, err := aws.GetLatestSource(ctx, c.kubernetesVersion)
if err != nil {
return err
}
log.Info("Using Kubernetes version", zap.Reflect("kubernetes version", awsSource.Eks.Version))
log.Info("Creating daemon manager..")
daemonManager, err := daemon.NewDaemonManager()
if err != nil {
return err
}
defer daemonManager.Close()
if installed.Artifacts.Kubelet {
kubeletStatus, err := daemonManager.GetDaemonStatus(kubelet.KubeletDaemonName)
if err != nil {
return err
}
if kubeletStatus == daemon.DaemonStatusRunning {
if !slices.Contains(c.skipPhases, skipPodPreflightCheck) {
log.Info("Validating if node has been drained...")
if drained, err := node.IsDrained(ctx); err != nil {
return fmt.Errorf("validating if node has been drained: %w", err)
} else if !drained {
return fmt.Errorf("only static pods and pods controlled by daemon-sets can be running on the node. Please move pods " +
"to different node or use --skip pod-validation")
}
}
if !slices.Contains(c.skipPhases, skipNodePreflightCheck) {
log.Info("Validating if node has been marked unschedulable...")
if err := node.IsUnscheduled(ctx); err != nil {
return fmt.Errorf("please drain or cordon node to mark it unschedulable or use --skip node-validation: %w", err)
}
}
}
}
log.Info("Creating package manager...")
containerdSource := containerd.GetContainerdSource(installed.Artifacts.Containerd)
log.Info("Configuring package manager with", zap.Reflect("containerd source", string(containerdSource)))
packageManager, err := packagemanager.New(containerdSource, log)
if err != nil {
return err
}
upgrader := &flows.Upgrader{
NodeProvider: nodeProvider,
AwsSource: awsSource,
PackageManager: packageManager,
CredentialProvider: credsProvider,
Artifacts: installed.Artifacts,
DaemonManager: daemonManager,
SkipPhases: c.skipPhases,
Logger: log,
}
return upgrader.Run(ctx)
}