in internal/testhelper/testserver/gitaly.go [319:529]
func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, ctx context.Context, cfg config.Cfg) *service.Dependencies {
if gsd.logger == nil {
gsd.logger = testhelper.NewLogger(tb, testhelper.WithLoggerName("gitaly"))
}
if gsd.conns == nil {
gsd.conns = client.NewPool(client.WithDialOptions(client.UnaryInterceptor(), client.StreamInterceptor()))
}
if gsd.locator == nil {
gsd.locator = config.NewLocator(cfg)
}
if gsd.gitlabClient == nil {
gsd.gitlabClient = gitlab.NewMockClient(
tb, gitlab.MockAllowed, gitlab.MockPreReceive, gitlab.MockPostReceive,
)
}
if gsd.backchannelReg == nil {
gsd.backchannelReg = backchannel.NewRegistry()
}
if gsd.txMgr == nil {
gsd.txMgr = transaction.NewManager(cfg, gsd.logger, gsd.backchannelReg)
}
if gsd.gitCmdFactory == nil {
gsd.gitCmdFactory = gittest.NewCommandFactory(tb, cfg)
}
if gsd.transactionRegistry == nil {
gsd.transactionRegistry = storagemgr.NewTransactionRegistry()
}
if gsd.procReceiveRegistry == nil {
gsd.procReceiveRegistry = hook.NewProcReceiveRegistry()
}
migrations := []migration.Migration{}
if gsd.MigrationStateManager == nil {
gsd.MigrationStateManager = migration.NewStateManager(migrations)
}
var node storage.Node
if testhelper.IsWALEnabled() {
dbMgr, err := databasemgr.NewDBManager(
ctx,
cfg.Storages,
keyvalue.NewBadgerStore,
helper.NewNullTickerFactory(),
gsd.logger,
)
require.NoError(tb, err)
tb.Cleanup(dbMgr.Close)
var raftFactory raftmgr.RaftReplicaFactory
var raftNode *raftmgr.Node
if testhelper.IsRaftEnabled() && !testhelper.IsPraefectEnabled() {
cfg.Raft = config.DefaultRaftConfig(uuid.New().String())
// Speed up initial election overhead in the test setup
cfg.Raft.ElectionTicks = 5
cfg.Raft.RTTMilliseconds = 100
cfg.Raft.SnapshotDir = testhelper.TempDir(tb)
raftNode, err = raftmgr.NewNode(cfg, gsd.logger, dbMgr, gsd.conns)
require.NoError(tb, err)
raftFactory = raftmgr.DefaultFactoryWithNode(cfg.Raft, raftNode)
}
partitionFactoryOptions := []partition.FactoryOption{
partition.WithCmdFactory(gsd.gitCmdFactory),
partition.WithRepoFactory(localrepo.NewFactory(gsd.logger, gsd.locator, gsd.gitCmdFactory, gsd.catfileCache)),
partition.WithMetrics(partition.NewMetrics(housekeeping.NewMetrics(cfg.Prometheus))),
partition.WithRaftConfig(cfg.Raft),
partition.WithRaftFactory(raftFactory),
}
nodeMgr, err := nodeimpl.NewManager(
cfg.Storages,
storagemgr.NewFactory(
gsd.logger,
dbMgr,
migration.NewFactory(
partition.NewFactory(partitionFactoryOptions...),
migration.NewMetrics(),
migrations,
),
storagemgr.DefaultMaxInactivePartitions,
storagemgr.NewMetrics(cfg.Prometheus),
),
)
require.NoError(tb, err)
tb.Cleanup(nodeMgr.Close)
if testhelper.IsRaftEnabled() && !testhelper.IsPraefectEnabled() {
for _, storageCfg := range cfg.Storages {
baseStorage, err := nodeMgr.GetStorage(storageCfg.Name)
require.NoError(tb, err)
require.NoError(tb, raftNode.SetBaseStorage(storageCfg.Name, baseStorage))
}
node = raftNode
} else {
node = nodeMgr
}
}
// This is to allow building a bundle generation from a Sink
// without having to create one beforehand.
// If bundleURIManager is defined though, we use that one.
if gsd.bundleURIManager == nil && gsd.bundleURISink != nil {
var strategy bundleuri.GenerationStrategy
if gsd.bundleURIStrategy != nil {
strategy = gsd.bundleURIStrategy
} else {
strategy = bundleuri.NewSimpleStrategy(true)
}
manager, err := bundleuri.NewGenerationManager(ctx, gsd.bundleURISink, gsd.logger, node, strategy)
require.NoError(tb, err)
gsd.bundleURIManager = manager
}
if gsd.hookMgr == nil {
gsd.hookMgr = hook.NewManager(
cfg, gsd.locator,
gsd.logger,
gsd.gitCmdFactory,
gsd.txMgr,
gsd.gitlabClient,
hook.NewTransactionRegistry(gsd.transactionRegistry),
gsd.procReceiveRegistry,
node,
)
}
if gsd.catfileCache == nil {
cache := catfile.NewCache(cfg)
gsd.catfileCache = cache
tb.Cleanup(cache.Stop)
}
if gsd.diskCache == nil {
gsd.diskCache = cache.New(cfg, gsd.locator, gsd.logger)
}
if gsd.packObjectsCache == nil {
gsd.packObjectsCache = streamcache.New(cfg.PackObjectsCache, gsd.logger)
tb.Cleanup(gsd.packObjectsCache.Stop)
}
if gsd.packObjectsLimiter == nil {
gsd.packObjectsLimiter = limiter.NewConcurrencyLimiter(
limiter.NewAdaptiveLimit("staticLimit", limiter.AdaptiveSetting{Initial: 0}),
0,
0,
limiter.NewNoopConcurrencyMonitor(),
)
}
if gsd.limitHandler == nil {
_, setupPerRPCConcurrencyLimiters := limithandler.WithConcurrencyLimiters(cfg)
gsd.limitHandler = limithandler.New(cfg, limithandler.LimitConcurrencyByRepo, setupPerRPCConcurrencyLimiters)
}
if gsd.repositoryCounter == nil {
gsd.repositoryCounter = counter.NewRepositoryCounter(cfg.Storages)
}
if gsd.updaterWithHooks == nil {
gsd.updaterWithHooks = updateref.NewUpdaterWithHooks(cfg, gsd.logger, gsd.locator, gsd.hookMgr, gsd.gitCmdFactory, gsd.catfileCache)
}
if gsd.housekeepingManager == nil {
gsd.housekeepingManager = housekeepingmgr.New(cfg.Prometheus, gsd.logger, gsd.txMgr, node)
}
if gsd.signingKey != "" {
cfg.Git.SigningKey = gsd.signingKey
}
gsd.localRepoFactory = localrepo.NewFactory(gsd.logger, gsd.locator, gsd.gitCmdFactory, gsd.catfileCache)
return &service.Dependencies{
Logger: gsd.logger,
Cfg: cfg,
ClientPool: gsd.conns,
StorageLocator: gsd.locator,
TransactionManager: gsd.txMgr,
GitalyHookManager: gsd.hookMgr,
GitCmdFactory: gsd.gitCmdFactory,
BackchannelRegistry: gsd.backchannelReg,
GitlabClient: gsd.gitlabClient,
CatfileCache: gsd.catfileCache,
DiskCache: gsd.diskCache,
PackObjectsCache: gsd.packObjectsCache,
PackObjectsLimiter: gsd.packObjectsLimiter,
LimitHandler: gsd.limitHandler,
RepositoryCounter: gsd.repositoryCounter,
UpdaterWithHooks: gsd.updaterWithHooks,
HousekeepingManager: gsd.housekeepingManager,
TransactionRegistry: gsd.transactionRegistry,
Node: node,
BackupSink: gsd.backupSink,
BackupLocator: gsd.backupLocator,
ProcReceiveRegistry: gsd.procReceiveRegistry,
BundleURIManager: gsd.bundleURIManager,
LocalRepositoryFactory: gsd.localRepoFactory,
MigrationStateManager: gsd.MigrationStateManager,
}
}