in pkg/providers/tinkerbell/upgrade.go [509:632]
func (p *Provider) PreCoreComponentsUpgrade(
ctx context.Context,
cluster *types.Cluster,
managementComponents *cluster.ManagementComponents,
clusterSpec *cluster.Spec,
) error {
// When a workload cluster the cluster object could be nil. Noop if it is.
if cluster == nil {
return nil
}
if clusterSpec == nil {
return errors.New("cluster spec is nil")
}
// PreCoreComponentsUpgrade can be called for workload clusters. Ensure we only attempt to
// upgrade the stack if we're upgrading a management cluster.
if clusterSpec.Cluster.IsManaged() {
return nil
}
// Attempt the upgrade. This should upgrade the stack in the management cluster by updating
// images, installing new CRDs and possibly removing old ones.
// Check if cluster has legacy chart installed
hasLegacy, err := p.stackInstaller.HasLegacyChart(ctx, managementComponents.Tinkerbell, cluster.KubeconfigFile)
if err != nil {
return fmt.Errorf("getting legacy chart: %v", err)
}
if hasLegacy {
// Upgrade legacy chart to add resource policy keep to the CRDs
err = p.stackInstaller.UpgradeLegacy(
ctx,
managementComponents.Tinkerbell,
cluster.KubeconfigFile,
stack.WithLoadBalancerEnabled(
len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations) != 0 && // load balancer is handled by kube-vip in control plane nodes
!p.datacenterConfig.Spec.SkipLoadBalancerDeployment), // configure load balancer based on datacenterConfig.Spec.SkipLoadBalancerDeployment
)
if err != nil {
return fmt.Errorf("upgrading legacy chart: %v", err)
}
// Uninstall legacy chart
err = p.stackInstaller.Uninstall(
ctx,
managementComponents.Tinkerbell,
cluster.KubeconfigFile,
)
if err != nil {
return fmt.Errorf("uninstalling legacy chart: %v", err)
}
// annotate existing CRDs to point to new CRDs chart
err = p.annotateCRDs(ctx, cluster)
if err != nil {
return fmt.Errorf("annotating crds: %v", err)
}
}
// upgrade install CRDs chart
err = p.stackInstaller.UpgradeInstallCRDs(
ctx,
managementComponents.Tinkerbell,
cluster.KubeconfigFile,
)
if err != nil {
return fmt.Errorf("upgrading crds chart: %v", err)
}
// upgrade install tink stack chart
err = p.stackInstaller.Upgrade(
ctx,
managementComponents.Tinkerbell,
p.datacenterConfig.Spec.TinkerbellIP,
cluster.KubeconfigFile,
p.datacenterConfig.Spec.HookImagesURLPath,
stack.WithLoadBalancerInterface(p.datacenterConfig.Spec.LoadBalancerInterface),
stack.WithBootsOnKubernetes(),
stack.WithStackServiceEnabled(true),
stack.WithDHCPRelayEnabled(true),
stack.WithLoadBalancerEnabled(
len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations) != 0 && // load balancer is handled by kube-vip in control plane nodes
!p.datacenterConfig.Spec.SkipLoadBalancerDeployment), // configure load balancer based on datacenterConfig.Spec.SkipLoadBalancerDeployment
stack.WithHookIsoOverride(p.datacenterConfig.Spec.HookIsoURL),
)
if err != nil {
return fmt.Errorf("upgrading stack: %v", err)
}
hasBaseboardManagement, err := p.providerKubectlClient.HasCRD(
ctx,
rufiounreleased.BaseboardManagementResourceName,
cluster.KubeconfigFile,
)
if err != nil {
return fmt.Errorf("upgrading rufio crds: %v", err)
}
// We introduced the Rufio dependency prior to its initial release. Between its introduction
// and its official release breaking changes occured to the CRDs. We're using the presence
// of the obsolete BaseboardManagement CRD to determine if there's an old Rufio installed.
// If there is, we need to convert all obsolete BaseboardManagement CRs to Machine CRs (the
// CRD that superseeds BaseboardManagement).
if hasBaseboardManagement {
if err := p.handleRufioUnreleasedCRDs(ctx, cluster); err != nil {
return fmt.Errorf("upgrading rufio crds: %v", err)
}
// Remove the unreleased Rufio CRDs from the cluster; this will also remove any residual
// resources.
err = p.providerKubectlClient.DeleteCRD(
ctx,
rufiounreleased.BaseboardManagementResourceName,
cluster.KubeconfigFile,
)
if err != nil {
return fmt.Errorf("could not delete machines crd: %v", err)
}
}
return nil
}