func main()

in gcp/dam/main.go [98:212]


func main() {
	flag.Parse()
	ctx := context.Background()

	serviceinfo.Project = project
	serviceinfo.Type = "dam"
	serviceinfo.Name = srvName

	sdlcc := grpcutil.NewGRPCClient(ctx, sdlAddr)
	defer sdlcc.Close()
	sdlc := lgrpcpb.NewLoggingServiceV2Client(sdlcc)

	var store storage.Store
	switch storageType {
	case "datastore":
		store = dsstore.NewStore(ctx, project, srvName, cfgPath)
	case "memory":
		store = storage.NewMemoryStorage(srvName, cfgPath)
		// Import and resolve template variables, if any.
		if err := dam.ImportConfig(store, srvName, nil, cfgVars, true, true, true); err != nil {
			glog.Exitf("dam.ImportConfig(_, %q, _) failed: %v", srvName, err)
		}
	default:
		glog.Exitf("Unknown storage type %q", storageType)
	}

	wh := saw.MustNew(ctx, store)

	kmsClient, err := kms.NewKeyManagementClient(ctx)
	if err != nil {
		glog.Exitf("kms.NewKeyManagementClient(ctx) failed: %v", err)
	}
	gcpSigner, err := gcpsign.New(ctx, project, "global", srvName+"_sign_ring", srvName+"_key", kmsClient)
	if err != nil {
		glog.Exitf("gcpcrypt.New(ctx, %q, %q, %q, %q, kmsClient) failed: %v", project, "global", srvName+"_sign_ring", srvName+"_key", err)
	}
	gcpEncryption, err := gcpcrypt.New(ctx, project, "global", srvName+"_ring", srvName+"_key", kmsClient)
	if err != nil {
		glog.Exitf("gcpcrypt.New(ctx, %q, %q, %q, %q, kmsClient) failed: %v", project, "global", srvName+"_ring", srvName+"_key", err)
	}

	logger, err := logging.NewClient(ctx, project)
	if err != nil {
		glog.Fatalf("logging.NewClient() failed: %v", err)
	}
	logger.OnError = func(err error) {
		glog.Warningf("StackdriverLogging.Client.OnError: %v", err)
	}

	var hyproxy *hydraproxy.Service
	if useHydra {
		hydraAdminAddr = osenv.MustVar("HYDRA_ADMIN_URL")
		hydraPublicAddr = osenv.MustVar("HYDRA_PUBLIC_URL")
		hydraPublicAddrInternal := osenv.MustVar("HYDRA_PUBLIC_URL_INTERNAL")

		hyproxy, err = hydraproxy.New(http.DefaultClient, hydraAdminAddr, hydraPublicAddrInternal, store)
		if err != nil {
			glog.Exitf("hydraproxy.New failed: %v", err)
		}
	}

	var awsClient aws.APIClient = nil
	if globalflags.EnableAWSAdapter {
		awsClient, err = aws.NewAPIClient()
		if err != nil {
			glog.Exitf("aws.NewAPIClient failed: %v", err)
		}
	}

	lros, err := lro.New("lro", 60*time.Second, 60*time.Second, store, nil)
	if err != nil {
		glog.Exitf("lro.New failed: %v", err)
	}

	r := mux.NewRouter()

	s := dam.New(r, &dam.Options{
		Domain:                     srvAddr,
		ServiceName:                srvName,
		DefaultBroker:              defaultBroker,
		Store:                      store,
		Warehouse:                  wh,
		AWSClient:                  awsClient,
		ServiceAccountManager:      wh,
		Logger:                     logger,
		SDLC:                       sdlc,
		AuditLogProject:            project,
		HidePolicyBasis:            hidePolicyBasis,
		HideRejectDetail:           hideRejectDetail,
		SkipInformationReleasePage: skipInformationReleasePage,
		ConsentDashboardURL:        consentDashboardURL,
		UseHydra:                   true,
		HydraAdminURL:              hydraAdminAddr,
		HydraPublicURL:             hydraPublicAddr,
		HydraPublicProxy:           hyproxy,
		Signer:                     gcpSigner,
		Encryption:                 gcpEncryption,
		LRO:                        lros,
	})

	r.HandleFunc("/liveness_check", httputils.LivenessCheckHandler)

	srv := server.New("dam", port, s.Handler)
	srv.ServeUnblock()

	c := make(chan os.Signal, 1)
	// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
	// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
	signal.Notify(c, os.Interrupt)

	// Block until we receive our signal.
	<-c

	srv.Shutdown()
}