in pkg/export/export.go [377:437]
func New(ctx context.Context, logger log.Logger, reg prometheus.Registerer, opts ExporterOpts, lease Lease) (*Exporter, error) {
grpc_prometheus.EnableClientHandlingTimeHistogram(
grpc_prometheus.WithHistogramBuckets([]float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 30, 40, 50, 60}),
)
if logger == nil {
logger = log.NewNopLogger()
}
if reg != nil {
reg.MustRegister(
prometheusSamplesDiscarded,
samplesExported,
samplesDropped,
samplesSent,
samplesSendErrors,
sendIterations,
shardProcess,
shardProcessPending,
shardProcessSamplesTaken,
pendingRequests,
projectsPerBatch,
samplesPerRPCBatch,
)
}
if err := opts.Validate(); err != nil {
return nil, err
}
if lease == nil {
lease = NopLease()
}
metricClient, err := defaultNewMetricClient(ctx, opts)
if err != nil {
return nil, fmt.Errorf("create metric client: %w", err)
}
e := &Exporter{
logger: logger,
ctx: ctx,
opts: opts,
metricClient: metricClient,
seriesCache: newSeriesCache(logger, reg, opts.MetricTypePrefix, opts.Matchers),
externalLabels: createLabelSet(&config.Config{}, &opts),
newMetricClient: defaultNewMetricClient,
nextc: make(chan struct{}, 1),
shards: make([]*shard, opts.Efficiency.ShardCount),
warnedUntypedMetrics: map[string]struct{}{},
lease: lease,
}
// Whenever the lease is lost, clear the series cache so we don't start off of out-of-range
// reset timestamps when we gain the lease again.
lease.OnLeaderChange(e.seriesCache.clear)
for i := range e.shards {
e.shards[i] = newShard(opts.Efficiency.ShardBufferSize)
}
return e, nil
}