func main()

in cmd/glbc/main.go [63:215]


func main() {
	flags.Register()
	rand.Seed(time.Now().UTC().UnixNano())
	flag.Parse()

	if flags.F.Version {
		fmt.Printf("Controller version: %s\n", version.Version)
		os.Exit(0)
	}

	klog.V(0).Infof("Starting GLBC image: %q, cluster name %q", version.Version, flags.F.ClusterName)
	klog.V(0).Infof("Latest commit hash: %q", version.GitCommit)
	for i, a := range os.Args {
		klog.V(0).Infof("argv[%d]: %q", i, a)
	}

	klog.V(2).Infof("Flags = %+v", flags.F)
	defer klog.Flush()
	// Create kube-config that uses protobufs to communicate with API server.
	kubeConfigForProtobuf, err := app.NewKubeConfigForProtobuf()
	if err != nil {
		klog.Fatalf("Failed to create kubernetes client config for protobuf: %v", err)
	}

	kubeClient, err := kubernetes.NewForConfig(kubeConfigForProtobuf)
	if err != nil {
		klog.Fatalf("Failed to create kubernetes client: %v", err)
	}

	// Due to scaling issues, leader election must be configured with a separate k8s client.
	leaderElectKubeClient, err := kubernetes.NewForConfig(restclient.AddUserAgent(kubeConfigForProtobuf, "leader-election"))
	if err != nil {
		klog.Fatalf("Failed to create kubernetes client for leader election: %v", err)
	}

	// Create kube-config for CRDs.
	// TODO(smatti): Migrate to use protobuf once CRD supports.
	kubeConfig, err := app.NewKubeConfig()
	if err != nil {
		klog.Fatalf("Failed to create kubernetes client config: %v", err)
	}

	var backendConfigClient backendconfigclient.Interface
	crdClient, err := crdclient.NewForConfig(kubeConfig)
	if err != nil {
		klog.Fatalf("Failed to create kubernetes CRD client: %v", err)
	}
	// TODO(rramkumar): Reuse this CRD handler for other CRD's coming.
	crdHandler := crd.NewCRDHandler(crdClient)
	backendConfigCRDMeta := backendconfig.CRDMeta()
	if _, err := crdHandler.EnsureCRD(backendConfigCRDMeta, true); err != nil {
		klog.Fatalf("Failed to ensure BackendConfig CRD: %v", err)
	}

	backendConfigClient, err = backendconfigclient.NewForConfig(kubeConfig)
	if err != nil {
		klog.Fatalf("Failed to create BackendConfig client: %v", err)
	}

	var frontendConfigClient frontendconfigclient.Interface
	if flags.F.EnableFrontendConfig {
		frontendConfigCRDMeta := frontendconfig.CRDMeta()
		if _, err := crdHandler.EnsureCRD(frontendConfigCRDMeta, true); err != nil {
			klog.Fatalf("Failed to ensure FrontendConfig CRD: %v", err)
		}

		frontendConfigClient, err = frontendconfigclient.NewForConfig(kubeConfig)
		if err != nil {
			klog.Fatalf("Failed to create FrontendConfig client: %v", err)
		}
	}

	var svcNegClient svcnegclient.Interface
	negCRDMeta := svcneg.CRDMeta()
	if _, err := crdHandler.EnsureCRD(negCRDMeta, true); err != nil {
		klog.Fatalf("Failed to ensure ServiceNetworkEndpointGroup CRD: %v", err)
	}

	svcNegClient, err = svcnegclient.NewForConfig(kubeConfig)
	if err != nil {
		klog.Fatalf("Failed to create NetworkEndpointGroup client: %v", err)
	}

	var svcAttachmentClient serviceattachmentclient.Interface
	if flags.F.EnablePSC {
		serviceAttachmentCRDMeta := serviceattachment.CRDMeta()
		if _, err := crdHandler.EnsureCRD(serviceAttachmentCRDMeta, true); err != nil {
			klog.Fatalf("Failed to ensure ServiceAttachment CRD: %v", err)
		}

		svcAttachmentClient, err = serviceattachmentclient.NewForConfig(kubeConfig)
		if err != nil {
			klog.Fatalf("Failed to create ServiceAttachment client: %v", err)
		}
	}

	ingClassEnabled := flags.F.EnableIngressGAFields && app.IngressClassEnabled(kubeClient)
	var ingParamsClient ingparamsclient.Interface
	if ingClassEnabled {
		ingParamsCRDMeta := ingparams.CRDMeta()
		if _, err := crdHandler.EnsureCRD(ingParamsCRDMeta, false); err != nil {
			klog.Fatalf("Failed to ensure GCPIngressParams CRD: %v", err)
		}

		if ingParamsClient, err = ingparamsclient.NewForConfig(kubeConfig); err != nil {
			klog.Fatalf("Failed to create GCPIngressParams client: %v", err)
		}
	}

	namer, err := app.NewNamer(kubeClient, flags.F.ClusterName, firewalls.DefaultFirewallName)
	if err != nil {
		klog.Fatalf("app.NewNamer(ctx.KubeClient, %q, %q) = %v", flags.F.ClusterName, firewalls.DefaultFirewallName, err)
	}
	if namer.UID() != "" {
		klog.V(0).Infof("Cluster name: %+v", namer.UID())
	}

	// Get kube-system UID that will be used for v2 frontend naming scheme.
	kubeSystemNS, err := kubeClient.CoreV1().Namespaces().Get(context.TODO(), "kube-system", metav1.GetOptions{})
	if err != nil {
		klog.Fatalf("Error getting kube-system namespace: %v", err)
	}
	kubeSystemUID := kubeSystemNS.GetUID()

	cloud := app.NewGCEClient()
	defaultBackendServicePort := app.DefaultBackendServicePort(kubeClient)
	ctxConfig := ingctx.ControllerContextConfig{
		Namespace:             flags.F.WatchNamespace,
		ResyncPeriod:          flags.F.ResyncPeriod,
		NumL4Workers:          flags.F.NumL4Workers,
		DefaultBackendSvcPort: defaultBackendServicePort,
		HealthCheckPath:       flags.F.HealthCheckPath,
		FrontendConfigEnabled: flags.F.EnableFrontendConfig,
		EnableASMConfigMap:    flags.F.EnableASMConfigMapBasedConfig,
		ASMConfigMapNamespace: flags.F.ASMConfigMapBasedConfigNamespace,
		ASMConfigMapName:      flags.F.ASMConfigMapBasedConfigCMName,
		EndpointSlicesEnabled: flags.F.EnableEndpointSlices,
	}
	ctx := ingctx.NewControllerContext(kubeConfig, kubeClient, backendConfigClient, frontendConfigClient, svcNegClient, ingParamsClient, svcAttachmentClient, cloud, namer, kubeSystemUID, ctxConfig)
	go app.RunHTTPServer(ctx.HealthCheck)

	if !flags.F.LeaderElection.LeaderElect {
		runControllers(ctx)
		return
	}

	electionConfig, err := makeLeaderElectionConfig(ctx, leaderElectKubeClient, ctx.Recorder(flags.F.LeaderElection.LockObjectNamespace))
	if err != nil {
		klog.Fatalf("%v", err)
	}
	leaderelection.RunOrDie(context.Background(), *electionConfig)
	klog.Warning("Ingress Controller exited.")
}