in prow/cmd/deck/main.go [267:499]
func main() {
logrusutil.ComponentInit()
o := gatherOptions(flag.NewFlagSet(os.Args[0], flag.ExitOnError), os.Args[1:]...)
if err := o.Validate(); err != nil {
logrus.WithError(err).Fatal("Invalid options")
}
defer interrupts.WaitForGracefulShutdown()
pprof.Instrument(o.instrumentation)
// setup config agent, pod log clients etc.
configAgent, err := o.config.ConfigAgentWithAdditionals(&config.Agent{}, []func(*config.Config) error{spglassConfigDefaulting})
if err != nil {
logrus.WithError(err).Fatal("Error starting config agent.")
}
cfg := configAgent.Config
var pluginAgent *plugins.ConfigAgent
if o.pluginsConfig.PluginConfigPath != "" {
pluginAgent, err = o.pluginsConfig.PluginAgent()
if err != nil {
logrus.WithError(err).Fatal("Error loading Prow plugin config.")
}
} else {
logrus.Info("No plugins configuration was provided to deck. You must provide one to reuse /test checks for rerun")
}
metrics.ExposeMetrics("deck", cfg().PushGateway, o.instrumentation.MetricsPort)
// signal to the world that we are healthy
// this needs to be in a separate port as we don't start the
// main server with the main mux until we're ready
health := pjutil.NewHealthOnPort(o.instrumentation.HealthPort)
mux := http.NewServeMux()
// setup common handlers for local and deployed runs
mux.Handle("/static/", http.StripPrefix("/static", staticHandlerFromDir(o.staticFilesLocation)))
mux.Handle("/config", gziphandler.GzipHandler(handleConfig(cfg, logrus.WithField("handler", "/config"))))
mux.Handle("/plugin-config", gziphandler.GzipHandler(handlePluginConfig(pluginAgent, logrus.WithField("handler", "/plugin-config"))))
mux.Handle("/favicon.ico", gziphandler.GzipHandler(handleFavicon(o.staticFilesLocation, cfg)))
// Set up handlers for template pages.
mux.Handle("/pr", gziphandler.GzipHandler(handleSimpleTemplate(o, cfg, "pr.html", nil)))
mux.Handle("/command-help", gziphandler.GzipHandler(handleSimpleTemplate(o, cfg, "command-help.html", nil)))
mux.Handle("/plugin-help", http.RedirectHandler("/command-help", http.StatusMovedPermanently))
mux.Handle("/tide", gziphandler.GzipHandler(handleSimpleTemplate(o, cfg, "tide.html", nil)))
mux.Handle("/tide-history", gziphandler.GzipHandler(handleSimpleTemplate(o, cfg, "tide-history.html", nil)))
mux.Handle("/plugins", gziphandler.GzipHandler(handleSimpleTemplate(o, cfg, "plugins.html", nil)))
runLocal := o.pregeneratedData != ""
var fallbackHandler func(http.ResponseWriter, *http.Request)
var pjListingClient jobs.PJListingClient
var githubClient deckGitHubClient
var gitClient git.ClientFactory
var podLogClients map[string]jobs.PodLogClient
if runLocal {
localDataHandler := staticHandlerFromDir(o.pregeneratedData)
fallbackHandler = localDataHandler.ServeHTTP
var fjc fakePjListingClientWrapper
var pjs prowapi.ProwJobList
staticPjsPath := path.Join(o.pregeneratedData, "prowjobs.json")
content, err := ioutil.ReadFile(staticPjsPath)
if err != nil {
logrus.WithError(err).Fatal("Failed to read jobs from prowjobs.json.")
}
if err = json.Unmarshal(content, &pjs); err != nil {
logrus.WithError(err).Fatal("Failed to unmarshal jobs from prowjobs.json.")
}
fjc.pjs = &pjs
pjListingClient = &fjc
} else {
fallbackHandler = http.NotFound
restCfg, err := o.kubernetes.InfrastructureClusterConfig(false)
if err != nil {
logrus.WithError(err).Fatal("Error getting infrastructure cluster config.")
}
mgr, err := manager.New(restCfg, manager.Options{
Namespace: cfg().ProwJobNamespace,
MetricsBindAddress: "0",
LeaderElection: false},
)
if err != nil {
logrus.WithError(err).Fatal("Error getting manager.")
}
// Force a cache for ProwJobs
if _, err := mgr.GetCache().GetInformer(interrupts.Context(), &prowapi.ProwJob{}); err != nil {
logrus.WithError(err).Fatal("Failed to get prowjob informer")
}
go func() {
if err := mgr.Start(interrupts.Context()); err != nil {
logrus.WithError(err).Fatal("Error starting manager.")
} else {
logrus.Info("Manager stopped gracefully.")
}
}()
mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), time.Duration(o.timeoutListingProwJobs)*time.Second)
defer mgrSyncCtxCancel()
if synced := mgr.GetCache().WaitForCacheSync(mgrSyncCtx); !synced {
logrus.Fatal("Timed out waiting for cachesync")
}
// The watch apimachinery doesn't support restarts, so just exit the binary if a kubeconfig changes
// to make the kubelet restart us.
if err := o.kubernetes.AddKubeconfigChangeCallback(func() {
logrus.Info("Kubeconfig changed, exiting to trigger a restart")
interrupts.Terminate()
}); err != nil {
logrus.WithError(err).Fatal("Failed to register kubeconfig change callback")
}
pjListingClient = &pjListingClientWrapper{mgr.GetClient()}
// We use the GH client to resolve GH teams when determining who is permitted to rerun a job.
// When inrepoconfig is enabled, both the GitHubClient and the gitClient are used to resolve
// presubmits dynamically which we need for the PR history page.
if o.github.TokenPath != "" || o.github.AppID != "" {
githubClient, err = o.github.GitHubClient(o.dryRun)
if err != nil {
logrus.WithError(err).Fatal("Error getting GitHub client.")
}
g, err := o.github.GitClient(o.dryRun)
if err != nil {
logrus.WithError(err).Fatal("Error getting Git client.")
}
gitClient = git.ClientFactoryFrom(g)
} else {
if len(cfg().InRepoConfig.Enabled) > 0 {
logrus.Fatal("--github-token-path must be configured with a valid token when using the inrepoconfig feature")
}
}
buildClusterClients, err := o.kubernetes.BuildClusterClients(cfg().PodNamespace, false)
if err != nil {
logrus.WithError(err).Fatal("Error getting Kubernetes client.")
}
podLogClients = make(map[string]jobs.PodLogClient)
for clusterContext, client := range buildClusterClients {
podLogClients[clusterContext] = &podLogClient{client: client}
}
}
authCfgGetter := func(refs *prowapi.Refs) *prowapi.RerunAuthConfig {
rac := cfg().Deck.RerunAuthConfigs.GetRerunAuthConfig(refs)
return &rac
}
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
fallbackHandler(w, r)
return
}
indexHandler := handleSimpleTemplate(o, cfg, "index.html", struct {
SpyglassEnabled bool
ReRunCreatesJob bool
}{
SpyglassEnabled: o.spyglass,
ReRunCreatesJob: o.rerunCreatesJob})
indexHandler(w, r)
})
ja := jobs.NewJobAgent(context.Background(), pjListingClient, o.hiddenOnly, o.showHidden, o.tenantIDs.Strings(), podLogClients, cfg)
ja.Start()
// setup prod only handlers. These handlers can work with runlocal as long
// as ja is properly mocked, more specifically pjListingClient inside ja
mux.Handle("/data.js", gziphandler.GzipHandler(handleData(ja, logrus.WithField("handler", "/data.js"))))
mux.Handle("/prowjobs.js", gziphandler.GzipHandler(handleProwJobs(ja, logrus.WithField("handler", "/prowjobs.js"))))
mux.Handle("/badge.svg", gziphandler.GzipHandler(handleBadge(ja)))
mux.Handle("/log", gziphandler.GzipHandler(handleLog(ja, logrus.WithField("handler", "/log"))))
if o.spyglass {
initSpyglass(cfg, o, mux, ja, githubClient, gitClient)
}
if runLocal {
mux = localOnlyMain(cfg, o, mux)
} else {
mux = prodOnlyMain(cfg, pluginAgent, authCfgGetter, githubClient, o, mux)
}
// signal to the world that we're ready
health.ServeReady()
// cookie secret will be used for CSRF protection and should be exactly 32 bytes
// we sometimes accept different lengths to stay backwards compatible
var csrfToken []byte
if o.cookieSecretFile != "" {
cookieSecretRaw, err := loadToken(o.cookieSecretFile)
if err != nil {
logrus.WithError(err).Fatal("Could not read cookie secret file")
}
decodedSecret, err := base64.StdEncoding.DecodeString(string(cookieSecretRaw))
if err != nil {
logrus.WithError(err).Fatal("Error decoding cookie secret")
}
if len(decodedSecret) == 32 {
csrfToken = decodedSecret
}
if len(decodedSecret) > 32 {
logrus.Warning("Cookie secret should be exactly 32 bytes. Consider truncating the existing cookie to that length")
hash := sha256.Sum256(decodedSecret)
csrfToken = hash[:]
}
if len(decodedSecret) < 32 {
if o.rerunCreatesJob {
logrus.Fatal("Cookie secret must be exactly 32 bytes")
return
}
logrus.Warning("Cookie secret should be exactly 32 bytes")
}
}
// if we allow direct reruns, we must protect against CSRF in all post requests using the cookie secret as a token
// for more information about CSRF, see https://github.com/kubernetes/test-infra/blob/master/prow/cmd/deck/csrf.md
empty := prowapi.Refs{}
if o.rerunCreatesJob && csrfToken == nil && !authCfgGetter(&empty).IsAllowAnyone() {
logrus.Fatal("Rerun creates job cannot be enabled without CSRF protection, which requires --cookie-secret to be exactly 32 bytes")
return
}
if csrfToken != nil {
CSRF := csrf.Protect(csrfToken, csrf.Path("/"), csrf.Secure(!o.allowInsecure))
logrus.WithError(http.ListenAndServe(":8080", CSRF(traceHandler(mux)))).Fatal("ListenAndServe returned.")
return
}
// setup done, actually start the server
server := &http.Server{Addr: ":8080", Handler: traceHandler(mux)}
interrupts.ListenAndServe(server, 5*time.Second)
}