in cmd/eksctl-anywhere/cmd/createcluster.go [103:290]
func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) error {
if cc.forceClean {
logger.MarkFail(forceCleanupDeprecationMessageForCreateDelete)
return errors.New("please remove the --force-cleanup flag")
}
ctx := cmd.Context()
clusterConfigFileExist := validations.FileExists(cc.fileName)
if !clusterConfigFileExist {
return fmt.Errorf("the cluster config file %s does not exist", cc.fileName)
}
clusterConfig, err := v1alpha1.GetAndValidateClusterConfig(cc.fileName)
if err != nil {
return fmt.Errorf("the cluster config file provided is invalid: %v", err)
}
if clusterConfig.Spec.DatacenterRef.Kind == v1alpha1.TinkerbellDatacenterKind {
if err := checkTinkerbellFlags(cmd.Flags(), cc.hardwareCSVPath, Create); err != nil {
return err
}
}
if clusterConfig.Spec.EtcdEncryption != nil {
return errors.New("etcdEncryption is not supported during cluster creation")
}
docker := executables.BuildDockerExecutable()
if err := validations.CheckMinimumDockerVersion(ctx, docker); err != nil {
return fmt.Errorf("failed to validate docker: %v", err)
}
validations.CheckDockerAllocatedMemory(ctx, docker)
kubeconfigPath := kubeconfig.FromClusterName(clusterConfig.Name)
if validations.FileExistsAndIsNotEmpty(kubeconfigPath) {
return fmt.Errorf(
"old cluster config file exists under %s, please use a different clusterName to proceed",
clusterConfig.Name,
)
}
clusterSpec, err := newClusterSpec(cc.clusterOptions)
if err != nil {
return err
}
if err := validations.ValidateAuthenticationForRegistryMirror(clusterSpec); err != nil {
return err
}
cliConfig := buildCliConfig(clusterSpec)
dirs, err := cc.directoriesToMount(clusterSpec, cliConfig, cc.installPackages)
if err != nil {
return err
}
createCLIConfig, err := buildCreateCliConfig(cc)
if err != nil {
return err
}
clusterManagerTimeoutOpts, err := buildClusterManagerOpts(cc.timeoutOptions, clusterSpec.Cluster.Spec.DatacenterRef.Kind)
if err != nil {
return fmt.Errorf("failed to build cluster manager opts: %v", err)
}
var skippedValidations map[string]bool
if len(cc.skipValidations) != 0 {
skippedValidations, err = validations.ValidateSkippableValidation(cc.skipValidations, createvalidations.SkippableValidations)
if err != nil {
return err
}
}
factory := dependencies.ForSpec(clusterSpec).WithExecutableMountDirs(dirs...).
WithBootstrapper().
WithCliConfig(cliConfig).
WithClusterManager(clusterSpec.Cluster, clusterManagerTimeoutOpts).
WithProvider(cc.fileName, clusterSpec.Cluster, cc.skipIpCheck, cc.hardwareCSVPath, cc.forceClean, cc.tinkerbellBootstrapIP, skippedValidations, cc.providerOptions).
WithGitOpsFlux(clusterSpec.Cluster, clusterSpec.FluxConfig, cliConfig).
WithWriter().
WithEksdInstaller().
WithPackageManager(clusterSpec, cc.installPackages, cc.managementKubeconfig).
WithValidatorClients().
WithCreateClusterDefaulter(createCLIConfig).
WithClusterApplier().
WithKubeconfigWriter(clusterSpec.Cluster).
WithClusterCreator(clusterSpec.Cluster).
WithClusterMover().
WithAwsIamAuth(clusterSpec.Cluster)
if cc.timeoutOptions.noTimeouts {
factory.WithNoTimeouts()
}
deps, err := factory.Build(ctx)
if err != nil {
return err
}
defer close(ctx, deps)
clusterSpec, err = deps.CreateClusterDefaulter.Run(ctx, clusterSpec)
if err != nil {
return err
}
mgmt := getManagementCluster(clusterSpec)
validationOpts := &validations.Opts{
Kubectl: deps.UnAuthKubectlClient,
Spec: clusterSpec,
WorkloadCluster: &types.Cluster{
Name: clusterSpec.Cluster.Name,
KubeconfigFile: kubeconfig.FromClusterName(clusterSpec.Cluster.Name),
},
ManagementCluster: mgmt,
Provider: deps.Provider,
CliConfig: cliConfig,
SkippedValidations: skippedValidations,
KubeClient: deps.UnAuthKubeClient.KubeconfigClient(mgmt.KubeconfigFile),
ManifestReader: deps.ManifestReader,
BundlesOverride: cc.bundlesOverride,
}
createValidations := createvalidations.New(validationOpts)
if features.UseNewWorkflows().IsActive() {
deps, err = factory.
Build(ctx)
if err != nil {
return err
}
wflw := &newManagement.CreateCluster{
Spec: clusterSpec,
Bootstrapper: deps.Bootstrapper,
CreateBootstrapClusterOptions: deps.Provider,
Cluster: clustermanager.NewCreateClusterShim(clusterSpec, deps.Provider),
FS: deps.Writer,
}
wflw.WithHookRegistrar(awsiamauth.NewHookRegistrar(deps.AwsIamAuth, clusterSpec))
// Not all provider implementations want to bind hooks so we explicitly check if they
// want to bind hooks before registering it.
if registrar, ok := deps.Provider.(newManagement.CreateClusterHookRegistrar); ok {
wflw.WithHookRegistrar(registrar)
}
err = wflw.Run(ctx)
} else if clusterConfig.IsManaged() {
createWorkloadCluster := workload.NewCreate(
deps.Provider,
deps.ClusterManager,
deps.GitOpsFlux,
deps.Writer,
deps.EksdInstaller,
deps.PackageManager,
deps.ClusterCreator,
deps.UnAuthKubectlClient,
deps.AwsIamAuth,
)
err = createWorkloadCluster.Run(ctx, clusterSpec, createValidations)
} else if clusterSpec.Cluster.IsSelfManaged() {
logger.V(1).Info("Using the eksa controller to create the management cluster")
createMgmtCluster := management.NewCreate(
deps.Bootstrapper,
deps.UnAuthKubeClient,
deps.Provider,
deps.ClusterManager,
deps.GitOpsFlux,
deps.Writer,
deps.EksdInstaller,
deps.PackageManager,
deps.ClusterCreator,
deps.EksaInstaller,
deps.ClusterMover,
deps.AwsIamAuth,
)
err = createMgmtCluster.Run(ctx, clusterSpec, createValidations)
}
cleanup(deps, &err)
return err
}