in pkg/lease/lease.go [129:198]
func New(
logger log.Logger,
metrics prometheus.Registerer,
lock resourcelock.Interface,
opts *Options,
) (*Lease, error) {
if logger == nil {
logger = log.NewNopLogger()
}
if opts == nil {
opts = &Options{}
}
if opts.LeaseDuration == 0 {
opts.LeaseDuration = 15 * time.Second
}
if opts.RetryPeriod == 0 {
opts.RetryPeriod = 2 * time.Second
}
if opts.RenewDeadline == 0 {
opts.RenewDeadline = 10 * time.Second
}
if metrics != nil {
if err := metrics.Register(leaseHolder); err != nil {
return nil, err
}
if err := metrics.Register(leaseFailingOpen); err != nil {
return nil, err
}
}
leaseHolder.WithLabelValues(lock.Describe()).Set(0)
leaseFailingOpen.WithLabelValues(lock.Describe()).Set(0)
wlock := newWrappedLock(lock)
lease := &Lease{
logger: logger,
lock: wlock,
onLeaderChange: func() {},
opts: *opts,
}
var err error
// We use the Kubernetes client-go leader implementation to drive the lease logic.
// The lock itself however may be implemented against any consistent backend.
lease.elector, err = leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
Lock: wlock,
LeaseDuration: opts.LeaseDuration,
RetryPeriod: opts.RetryPeriod,
RenewDeadline: opts.RenewDeadline,
// The purpose of our lease is to determine time ranges for which a leader sends
// sample data. We cannot be certain that we never sent data for a later in-range
// timestamp already. Thus releasing the lease on cancel would produce possible
// overlaps.
ReleaseOnCancel: false,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(context.Context) {
lease.onLeaderChange()
leaseHolder.WithLabelValues(lock.Describe()).Set(1)
},
OnStoppedLeading: func() {
lease.onLeaderChange()
leaseHolder.WithLabelValues(lock.Describe()).Set(0)
},
},
})
if err != nil {
return nil, err
}
return lease, nil
}