in pkg/controllers/status/available_controller.go [146:224]
func NewAvailableConditionController(
apiServiceInformer informers.APIServiceInformer,
serviceInformer v1informers.ServiceInformer,
endpointsInformer v1informers.EndpointsInformer,
apiServiceClient apiregistrationclient.APIServicesGetter,
proxyTransport *http.Transport,
proxyCurrentCertKeyContent certKeyFunc,
serviceResolver ServiceResolver,
egressSelector *egressselector.EgressSelector,
) (*AvailableConditionController, error) {
c := &AvailableConditionController{
apiServiceClient: apiServiceClient,
apiServiceLister: apiServiceInformer.Lister(),
apiServiceSynced: apiServiceInformer.Informer().HasSynced,
serviceLister: serviceInformer.Lister(),
servicesSynced: serviceInformer.Informer().HasSynced,
endpointsLister: endpointsInformer.Lister(),
endpointsSynced: endpointsInformer.Informer().HasSynced,
serviceResolver: serviceResolver,
queue: workqueue.NewNamedRateLimitingQueue(
// We want a fairly tight requeue time. The controller listens to the API, but because it relies on the routability of the
// service network, it is possible for an external, non-watchable factor to affect availability. This keeps
// the maximum disruption time to a minimum, but it does prevent hot loops.
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second),
"AvailableConditionController"),
proxyCurrentCertKeyContent: proxyCurrentCertKeyContent,
tlsCache: &tlsTransportCache{transports: make(map[tlsCacheKey]http.RoundTripper)},
metrics: newAvailabilityMetrics(),
}
if egressSelector != nil {
networkContext := egressselector.Cluster.AsNetworkContext()
var egressDialer utilnet.DialFunc
egressDialer, err := egressSelector.Lookup(networkContext)
if err != nil {
return nil, err
}
c.dialContext = egressDialer
} else if proxyTransport != nil && proxyTransport.DialContext != nil {
c.dialContext = proxyTransport.DialContext
}
// resync on this one because it is low cardinality and rechecking the actual discovery
// allows us to detect health in a more timely fashion when network connectivity to
// nodes is snipped, but the network still attempts to route there. See
// https://github.com/openshift/origin/issues/17159#issuecomment-341798063
apiServiceInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: c.addAPIService,
UpdateFunc: c.updateAPIService,
DeleteFunc: c.deleteAPIService,
},
30*time.Second)
serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addService,
UpdateFunc: c.updateService,
DeleteFunc: c.deleteService,
})
endpointsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addEndpoints,
UpdateFunc: c.updateEndpoints,
DeleteFunc: c.deleteEndpoints,
})
c.syncFn = c.sync
// TODO: decouple from legacyregistry
var err error
registerIntoLegacyRegistryOnce.Do(func() {
err = c.metrics.Register(legacyregistry.Register, legacyregistry.CustomRegister)
})
if err != nil {
return nil, err
}
return c, nil
}