in pkg/exporter/exporter.go [48:118]
func (e *Exporter) Run(ctx context.Context, configMapName, region string, patrolInterval time.Duration, stopChan <-chan struct{}) {
// create configMap first time
err := e.RefreshData(ctx, configMapName, region)
if err != nil {
return
}
informerFactory := informers.NewSharedInformerFactory(e.clusterClient, time.Hour*1)
configMapInformer := informerFactory.Core().V1().ConfigMaps().Informer()
// Create a channel to receive events from the informer
eventChan := make(chan interface{})
defer close(eventChan)
configMapInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
DeleteFunc: func(obj interface{}) {
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
return
}
if cm.ObjectMeta.Name != configMapName {
return
}
eventChan <- obj
},
})
go configMapInformer.Run(stopChan)
// Wait for the informer to sync
if !cache.WaitForCacheSync(stopChan, configMapInformer.HasSynced) {
runtime.HandleError(fmt.Errorf("timed out waiting for cache to sync"))
return
}
refreshPatrol := time.NewTicker(patrolInterval)
defer refreshPatrol.Stop()
for {
select {
case <-eventChan:
err := e.RefreshData(ctx, configMapName, region)
if err != nil {
return
}
e.recorder.Eventf(&corev1.ObjectReference{
Kind: "Pod",
Namespace: client.Namespace,
Name: client.PodName,
}, corev1.EventTypeWarning, "Configmap Deleted", "Configmap got deleted")
// if refresh time elapsed
case <-refreshPatrol.C:
err := e.DeleteConfigMap(ctx, configMapName)
if err != nil && !apierrors.IsNotFound(err) {
break
}
e.recorder.Eventf(&corev1.ObjectReference{
Kind: "Pod",
Namespace: client.Namespace,
Name: client.PodName,
}, corev1.EventTypeNormal, "Configmap updated", "Configmap got updated")
// context got canceled or done
case <-ctx.Done():
case <-stopChan:
return
}
}
}