func startOnlineGC()

in registry/handlers/app.go [724:808]


func startOnlineGC(ctx context.Context, db *datastore.DB, storageDriver storagedriver.StorageDriver, config *configuration.Configuration) {
	if !config.Database.Enabled || config.GC.Disabled || (config.GC.Blobs.Disabled && config.GC.Manifests.Disabled) {
		return
	}

	l := dlog.GetLogger(dlog.WithContext(ctx))

	aOpts := []gc.AgentOption{
		gc.WithLogger(l),
	}
	if config.GC.NoIdleBackoff {
		aOpts = append(aOpts, gc.WithoutIdleBackoff())
	}
	if config.GC.MaxBackoff > 0 {
		aOpts = append(aOpts, gc.WithMaxBackoff(config.GC.MaxBackoff))
	}
	if config.GC.ErrorCooldownPeriod > 0 {
		aOpts = append(aOpts, gc.WithErrorCooldown(config.GC.ErrorCooldownPeriod))
	}

	var agents []*gc.Agent

	if !config.GC.Blobs.Disabled {
		bwOpts := []worker.BlobWorkerOption{
			worker.WithBlobLogger(l),
		}
		if config.GC.TransactionTimeout > 0 {
			bwOpts = append(bwOpts, worker.WithBlobTxTimeout(config.GC.TransactionTimeout))
		}
		if config.GC.Blobs.StorageTimeout > 0 {
			bwOpts = append(bwOpts, worker.WithBlobStorageTimeout(config.GC.Blobs.StorageTimeout))
		}
		bw := worker.NewBlobWorker(db, storageDriver, bwOpts...)

		baOpts := aOpts
		if config.GC.Blobs.Interval > 0 {
			baOpts = append(baOpts, gc.WithInitialInterval(config.GC.Blobs.Interval))
		}
		ba := gc.NewAgent(bw, baOpts...)
		agents = append(agents, ba)
	}

	if !config.GC.Manifests.Disabled {
		mwOpts := []worker.ManifestWorkerOption{
			worker.WithManifestLogger(l),
		}
		if config.GC.TransactionTimeout > 0 {
			mwOpts = append(mwOpts, worker.WithManifestTxTimeout(config.GC.TransactionTimeout))
		}
		mw := worker.NewManifestWorker(db, mwOpts...)

		maOpts := aOpts
		if config.GC.Manifests.Interval > 0 {
			maOpts = append(maOpts, gc.WithInitialInterval(config.GC.Manifests.Interval))
		}
		ma := gc.NewAgent(mw, maOpts...)
		agents = append(agents, ma)
	}

	for _, a := range agents {
		go func(a *gc.Agent) {
			// This function can only end in two situations: panic or context cancellation. If a panic occurs we should
			// log, report to Sentry and then re-panic, as the instance would be in an inconsistent/unknown state. In
			// case of context cancellation, the app is shutting down, so there is nothing to worry about.
			defer func() {
				if err := recover(); err != nil {
					l.WithFields(dlog.Fields{"error": err}).Error("online GC agent stopped with panic")
					sentry.CurrentHub().Recover(err)
					sentry.Flush(5 * time.Second)
					panic(err)
				}
			}()
			if err := a.Start(ctx); err != nil {
				if errors.Is(err, context.Canceled) {
					// leaving this here for now for additional confidence and improved observability
					l.Warn("shutting down online GC agent due due to context cancellation")
				} else {
					// this should never happen, but leaving it here for future proofing against bugs within Agent.Start
					errortracking.Capture(fmt.Errorf("online GC agent stopped with error: %w", err), errortracking.WithStackTrace())
					l.WithError(err).Error("online GC agent stopped")
				}
			}
		}(a)
	}
}