in executors/kubernetes/kubernetes.go [3210:3272]
func newExecutor() *executor {
e := &executor{
AbstractExecutor: executors.AbstractExecutor{
ExecutorOptions: executorOptions,
Config: common.RunnerConfig{
RunnerSettings: common.RunnerSettings{
Kubernetes: &common.KubernetesConfig{},
},
},
},
remoteProcessTerminated: make(chan shells.StageCommandStatus),
newKubeClient: func(config *restclient.Config) (kubernetes.Interface, error) {
return kubernetes.NewForConfig(config)
},
getKubeConfig: getKubeClientConfig,
windowsKernelVersion: os_helpers.LocalKernelVersion,
}
type resourceCheckResult struct {
allowed bool
reason string
}
e.newPodWatcher = func(c podWatcherConfig) podWatcher {
gvr := metav1.GroupVersionResource{Version: "v1", Resource: "pods"}
docLink := "https://docs.gitlab.com/runner/executors/kubernetes/#informers"
for _, verb := range []string{"list", "watch"} {
res, err := retry.WithValueFn(c.retryProvider, func() (resourceCheckResult, error) {
allowed, reason, err := c.featureChecker.IsResourceVerbAllowed(c.ctx, gvr, c.namespace, verb)
return resourceCheckResult{allowed, reason}, err
}).Run()
if res.allowed && err == nil {
continue
}
reason := res.reason
if err != nil {
reason = err.Error()
}
c.logger.Warningln(fmt.Sprintf("won't use informers: %q, see: %s", reason, docLink))
return watchers.NoopPodWatcher{}
}
return watchers.NewPodWatcher(c.ctx, c.logger, c.kubeClient, c.namespace, c.labels, c.maxSyncDuration)
}
e.newLogProcessor = func() logProcessor {
return newKubernetesLogProcessor(
e.kubeClient,
e.kubeConfig,
&backoff.Backoff{Min: time.Second, Max: 30 * time.Second},
e.Build.Log(),
kubernetesLogProcessorPodConfig{
namespace: e.pod.Namespace,
pod: e.pod.Name,
container: helperContainerName,
logPath: e.logFile(),
waitLogFileTimeout: waitLogFileTimeout,
},
)
}
return e
}