func main()

in go/cmd/ct-fetch/ct-fetch.go [1000:1127]


func main() {
	defer glog.Flush()

	ctconfig.Init()

	ctx := context.Background()
	ctx, cancelMain := context.WithCancel(ctx)

	// Try to handle SIGINT and SIGTERM gracefully
	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
	defer close(sigChan)
	go func() {
		sig := <-sigChan
		glog.Infof("Signal caught: %s..", sig)
		cancelMain()
		signal.Stop(sigChan) // Restore default behavior
	}()

	// Seed random for clock jitter
	rand.Seed(time.Now().UnixNano())

	storageDB, _ := engine.GetConfiguredStorage(ctx, ctconfig, true)
	err := storageDB.EnsureCacheIsConsistent()
	if err != nil {
		glog.Errorf("Could not recover cache: %s", err)
		os.Exit(1)
	}

	engine.PrepareTelemetry("ct-fetch", ctconfig)

	enrolledLogs := NewEnrolledLogs()

	syncEngine := NewLogSyncEngine(storageDB)

	// Start a pool of threads to parse and store log entries
	syncEngine.StartDatabaseThreads()

	// Sync with logs as they are enrolled
	go func() {
		for ctLog := range enrolledLogs.NewChan {
			glog.Infof("[%s] Starting download.", ctLog.URL)
			go syncEngine.SyncLog(ctx, enrolledLogs, ctLog)
		}
	}()

	// Enroll logs from local settings
	if *ctconfig.CTLogMetadata != "" {
		localCTLogList := new([]types.CTLogMetadata)
		if err := json.Unmarshal([]byte(*ctconfig.CTLogMetadata), localCTLogList); err != nil {
			glog.Fatalf("Unable to parse CTLogMetadata argument: %s", err)
		}

		for _, ctLog := range *localCTLogList {
			if ctLog.CRLiteEnrolled {
				enrolledLogs.Enroll(ctLog)
			}
		}
	}

	if enrolledLogs.Count() == 0 && *ctconfig.RemoteSettingsURL == "" {
		// Didn't include a mandatory action, so print usage and exit.
		if *ctconfig.CTLogMetadata != "" {
			glog.Warningf("No enrolled logs found in %s.", *ctconfig.CTLogMetadata)
		}
		ctconfig.Usage()
		os.Exit(2)
	}

	// If we're configured with a Remote Settings URL, we'll periodically look for
	// newly enrolled logs in Remote Settings. Otherwise we have all of the logs already.
	if *ctconfig.RemoteSettingsURL != "" {
		go enrolledLogs.UpdateFromRemoteSettings(ctx)
	} else {
		enrolledLogs.Finalize()
	}

	healthHandler := http.NewServeMux()
	healthHandler.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
		approxUpdateTimestamp := syncEngine.ApproximateMostRecentUpdateTimestamp()

		if approxUpdateTimestamp.IsZero() {
			w.Header().Add("Retry-After", "30")
			w.WriteHeader(503)
			_, err := w.Write([]byte("error: no health updates yet, Retry-After 30 seconds"))
			if err != nil {
				glog.Warningf("Couldn't return too early health status: %+v", err)
			}
			return
		}

		duration := time.Since(approxUpdateTimestamp)
		evaluationTime := 2 * time.Duration(*ctconfig.PollingDelay) * time.Second
		if duration > evaluationTime {
			w.WriteHeader(500)
			_, err := w.Write([]byte(fmt.Sprintf("error: %v since last update, which is longer than 2 * pollingDelay", duration)))
			if err != nil {
				glog.Warningf("Couldn't return poor health status: %+v", err)
			}
			return
		}

		w.WriteHeader(200)
		_, err := w.Write([]byte(fmt.Sprintf("ok: %v since last update, which is shorter than 2 * pollingDelay", duration)))
		if err != nil {
			glog.Warningf("Couldn't return ok health status: %+v", err)
		}
	})

	healthServer := &http.Server{
		Handler: healthHandler,
		Addr:    *ctconfig.HealthAddr,
	}
	go healthServer.ListenAndServe()

	// Wait until we've finalized enrollment.
	enrolledLogs.Wait()

	// Wait until all jobs are finished.
	syncEngine.Wait()

	if err := healthServer.Shutdown(ctx); err != nil {
		glog.Infof("HTTP server shutdown error: %v", err)
	}
	glog.Flush()

	os.Exit(0)
}