in cmd/coordinator/coordinator.go [245:407]
func main() {
https.RegisterFlags(flag.CommandLine)
flag.Parse()
clog.SetProcessMetadata(processID, processStartTime)
if Version == "" && *mode == "dev" {
Version = "dev"
}
log.Printf("coordinator version %q starting", Version)
sc := mustCreateSecretClientOnGCE()
if sc != nil {
defer sc.Close()
}
mustInitMasterKeyCache(sc)
// TODO(golang.org/issue/38337): remove package level variables where possible.
// TODO(golang.org/issue/36841): remove after key functions are moved into
// a shared package.
pool.SetBuilderMasterKey(masterKey())
err := pool.InitGCE(sc, vmDeleteTimeout, testFiles, &basePinErr, isGCERemoteBuildlet, *buildEnvName, *mode)
if err != nil {
if *mode == "" {
*mode = "dev"
}
log.Printf("VM support disabled due to error initializing GCE: %v", err)
} else {
if *mode == "" {
*mode = "prod"
}
}
gce := pool.NewGCEConfiguration()
// TODO(evanbrown: disable kubePool if init fails)
err = pool.InitKube(monitorGitMirror)
if err != nil {
pool.KubeSetErr(err)
log.Printf("Kube support disabled due to error initializing Kubernetes: %v", err)
}
if *mode == "prod" || (*mode == "dev" && *devEnableEC2) {
// TODO(golang.org/issues/38337) the coordinator will use a package scoped pool
// until the coordinator is refactored to not require them.
ec2Pool := mustCreateEC2BuildletPool(sc)
defer ec2Pool.Close()
}
if *mode == "dev" {
// Replace linux-amd64 with a config using a -localdev reverse
// buildlet so it is possible to run local builds by starting a
// local reverse buildlet.
dashboard.Builders["linux-amd64"] = &dashboard.BuildConfig{
Name: "linux-amd64",
HostType: "host-linux-amd64-localdev",
}
}
go clog.CoordinatorProcess().UpdateInstanceRecord()
switch *mode {
case "dev", "prod":
log.Printf("Running in %s mode", *mode)
default:
log.Fatalf("Unknown mode: %q", *mode)
}
addHealthCheckers(context.Background(), sc)
gr, err := metrics.GKEResource("coordinator-deployment")
if err != nil && metadata.OnGCE() {
log.Println("metrics.GKEResource:", err)
}
mux := http.NewServeMux()
if ms, err := metrics.NewService(gr, views); err != nil {
log.Println("failed to initialize metrics:", err)
} else {
mux.Handle("/metrics", ms)
defer ms.Stop()
}
cc, err := grpc4.NewClient(http.DefaultClient, "https://maintner.golang.org")
if err != nil {
log.Fatal(err)
}
maintnerClient = apipb.NewMaintnerServiceClient(cc)
if err := loadStatic(); err != nil {
log.Printf("Failed to load static resources: %v", err)
}
var opts []grpc.ServerOption
if *buildEnvName == "" && *mode != "dev" && metadata.OnGCE() {
projectID, err := metadata.ProjectID()
if err != nil {
log.Fatalf("metadata.ProjectID() = %v", err)
}
env := buildenv.ByProjectID(projectID)
var coordinatorBackend, serviceID = "coordinator-internal-iap", ""
if serviceID = env.IAPServiceID(coordinatorBackend); serviceID == "" {
log.Fatalf("unable to retrieve Service ID for backend service=%q", coordinatorBackend)
}
opts = append(opts, grpc.UnaryInterceptor(access.RequireIAPAuthUnaryInterceptor(access.IAPSkipAudienceValidation)))
opts = append(opts, grpc.StreamInterceptor(access.RequireIAPAuthStreamInterceptor(access.IAPSkipAudienceValidation)))
}
// grpcServer is a shared gRPC server. It is global, as it needs to be used in places that aren't factored otherwise.
grpcServer := grpc.NewServer(opts...)
dashV1 := legacydash.Handler(gce.GoDSClient(), maintnerClient, string(masterKey()), grpcServer)
dashV2 := &builddash.Handler{Datastore: gce.GoDSClient(), Maintner: maintnerClient}
gs := &gRPCServer{dashboardURL: "https://build.golang.org"}
gomoteServer := gomote.New(remote.NewSessionPool(context.Background()), sched)
protos.RegisterCoordinatorServer(grpcServer, gs)
gomoteprotos.RegisterGomoteServiceServer(grpcServer, gomoteServer)
mux.HandleFunc("/", grpcHandlerFunc(grpcServer, handleStatus)) // Serve a status page at farmer.golang.org.
mux.Handle("build.golang.org/", dashV1) // Serve a build dashboard at build.golang.org.
mux.Handle("build-staging.golang.org/", dashV1)
mux.HandleFunc("/builders", handleBuilders)
mux.HandleFunc("/temporarylogs", handleLogs)
mux.HandleFunc("/reverse", pool.HandleReverse)
mux.Handle("/revdial", revdial.ConnHandler())
mux.HandleFunc("/style.css", handleStyleCSS)
mux.HandleFunc("/try", serveTryStatus(false))
mux.HandleFunc("/try.json", serveTryStatus(true))
mux.HandleFunc("/status/reverse.json", pool.ReversePool().ServeReverseStatusJSON)
mux.HandleFunc("/status/post-submit-active.json", handlePostSubmitActiveJSON)
mux.Handle("/dashboard", dashV2)
mux.Handle("/buildlet/create", requireBuildletProxyAuth(http.HandlerFunc(handleBuildletCreate)))
mux.Handle("/buildlet/list", requireBuildletProxyAuth(http.HandlerFunc(handleBuildletList)))
if *mode == "dev" {
// TODO(crawshaw): do more in dev mode
gce.BuildletPool().SetEnabled(*devEnableGCE)
go findWorkLoop()
} else {
go gce.BuildletPool().CleanUpOldVMs()
if pool.KubeErr() == nil {
go pool.KubePool().CleanUpOldPodsLoop(context.Background())
}
if gce.InStaging() {
dashboard.Builders = stagingClusterBuilders()
}
go listenAndServeInternalModuleProxy()
go findWorkLoop()
go findTryWorkLoop()
go reportReverseCountMetrics()
// TODO(cmang): gccgo will need its own findWorkLoop
}
go listenAndServeSSH(sc) // ssh proxy to remote buildlets; remote.go
h := httpRouter(mux)
if *mode == "dev" {
// Use hostPathHandler in local development mode (only) to improve
// convenience of testing multiple domains that coordinator serves.
h = hostPathHandler(h)
}
log.Fatalln(https.ListenAndServe(context.Background(), h))
}