func CreateNodepool()

in virtualcluster/nodes_create.go [38:93]


func CreateNodepool(ctx context.Context, kubeCfgPath string, nodepoolName string, opts ...NodepoolOpt) (retErr error) {
	cfg := defaultNodepoolCfg
	for _, opt := range opts {
		opt(&cfg)
	}
	cfg.name = nodepoolName

	if err := cfg.validate(); err != nil {
		return err
	}

	getCli, err := helmcli.NewGetCli(kubeCfgPath, virtualnodeReleaseNamespace)
	if err != nil {
		return fmt.Errorf("failed to create helm get client: %w", err)
	}

	_, err = getCli.Get(cfg.nodeHelmReleaseName())
	if err == nil {
		return fmt.Errorf("nodepool %s already exists", cfg.nodeHelmReleaseName())
	}

	cleanupFn, err := createNodepoolController(ctx, kubeCfgPath, &cfg)
	if err != nil {
		return err
	}
	defer func() {
		// NOTE: Try best to cleanup. If there is leaky resources after
		// force stop, like kill process, it needs cleanup manually.
		if retErr != nil {
			_ = cleanupFn()
		}
	}()

	ch, err := manifests.LoadChart(virtualnodeChartName)
	if err != nil {
		return fmt.Errorf("failed to load virtual node chart: %w", err)
	}

	valueAppliers, err := cfg.toNodeHelmValuesAppliers()
	if err != nil {
		return err
	}

	releaseCli, err := helmcli.NewReleaseCli(
		kubeCfgPath,
		virtualnodeReleaseNamespace,
		cfg.nodeHelmReleaseName(),
		ch,
		virtualnodeReleaseLabels,
		valueAppliers...,
	)
	if err != nil {
		return fmt.Errorf("failed to create helm release client: %w", err)
	}
	return releaseCli.Deploy(ctx, 30*time.Minute)
}