func InitGCE()

in internal/coordinator/pool/gce.go [94:212]


func InitGCE(sc *secret.Client, vmDeleteTimeout time.Duration, tFiles map[string]string, basePin *atomic.Value, fn IsRemoteBuildletFunc, buildEnvName, mode string) error {
	gceMode = mode
	deleteTimeout = vmDeleteTimeout
	testFiles = tFiles
	basePinErr = basePin
	isGCERemoteBuildlet = fn

	ctx := context.Background()
	var err error

	// If the coordinator is running on a GCE instance and a
	// buildEnv was not specified with the env flag, set the
	// buildEnvName to the project ID
	if buildEnvName == "" {
		if mode == "dev" {
			buildEnvName = "dev"
		} else if metadata.OnGCE() {
			buildEnvName, err = metadata.ProjectID()
			if err != nil {
				log.Fatalf("metadata.ProjectID: %v", err)
			}
		}
	}

	buildEnv = buildenv.ByProjectID(buildEnvName)
	inStaging = buildEnv == buildenv.Staging

	// If running on GCE, override the zone and static IP, and check service account permissions.
	if metadata.OnGCE() {
		gkeNodeHostname, err = metadata.Get("instance/hostname")
		if err != nil {
			return fmt.Errorf("failed to get current instance hostname: %v", err)
		}

		if buildEnv.KubeBuild.Zone == "" {
			projectZone, err := metadata.Get("instance/zone")
			if err != nil || projectZone == "" {
				return fmt.Errorf("failed to get current GCE zone: %v", err)
			}
			// Convert the zone from "projects/1234/zones/us-central1-a" to "us-central1-a".
			projectZone = path.Base(projectZone)
			buildEnv.KubeBuild.Zone = projectZone
		}

		if buildEnv.StaticIP == "" {
			buildEnv.StaticIP, err = metadata.ExternalIP()
			if err != nil {
				return fmt.Errorf("ExternalIP: %v", err)
			}
		}

		if !hasComputeScope() {
			return errors.New("coordinator is not running with access to read and write Compute resources. VM support disabled")
		}
	}

	cfgDump, _ := json.MarshalIndent(buildEnv, "", "  ")
	log.Printf("Loaded configuration %q for project %q:\n%s", buildEnvName, buildEnv.ProjectName, cfgDump)

	if mode != "dev" {
		storageClient, err = storage.NewClient(ctx)
		if err != nil {
			log.Fatalf("storage.NewClient: %v", err)
		}
	}

	dsClient, err = datastore.NewClient(ctx, buildEnv.ProjectName)
	if err != nil {
		if mode == "dev" {
			log.Printf("Error creating datastore client for %q: %v", buildEnv.ProjectName, err)
		} else {
			log.Fatalf("Error creating datastore client for %q: %v", buildEnv.ProjectName, err)
		}
	}
	goDSClient, err = datastore.NewClient(ctx, buildEnv.GoProjectName)
	if err != nil {
		if mode == "dev" {
			log.Printf("Error creating datastore client for %q: %v", buildEnv.GoProjectName, err)
		} else {
			log.Fatalf("Error creating datastore client for %q: %v", buildEnv.GoProjectName, err)
		}
	}

	// don't send dev errors to Stackdriver.
	if mode != "dev" {
		errorsClient, err = errorreporting.NewClient(ctx, buildEnv.ProjectName, errorreporting.Config{
			ServiceName: "coordinator",
		})
		if err != nil {
			// don't exit, we still want to run coordinator
			log.Printf("Error creating errors client: %v", err)
		}
	}

	gcpCreds, err = buildEnv.Credentials(ctx)
	if err != nil {
		if mode == "dev" {
			// don't try to do anything else with GCE, as it will likely fail
			return nil
		}
		log.Fatalf("failed to get a token source: %v", err)
	}
	oAuthHTTPClient = oauth2.NewClient(ctx, gcpCreds.TokenSource)
	computeService, _ = compute.New(oAuthHTTPClient)
	errTryDeps = checkTryBuildDeps(ctx, sc)
	if errTryDeps != nil {
		log.Printf("TryBot builders disabled due to error: %v", errTryDeps)
	} else {
		log.Printf("TryBot builders enabled.")
	}

	if mode != "dev" && metadata.OnGCE() && (buildEnv == buildenv.Production || buildEnv == buildenv.Staging) {
		go syncBuildStatsLoop(buildEnv)
		go gcePool.pollQuotaLoop()
		go createBasepinDisks(ctx)
	}

	return nil
}