func StartNewVM()

in buildlet/gce.go [39:278]


func StartNewVM(creds *google.Credentials, buildEnv *buildenv.Environment, instName, hostType string, opts VMOpts) (Client, error) {
	ctx := context.TODO()
	computeService, _ := compute.New(oauth2.NewClient(ctx, creds.TokenSource))

	if opts.Description == "" {
		opts.Description = fmt.Sprintf("Go Builder for %s", hostType)
	}
	if opts.ProjectID == "" {
		opts.ProjectID = buildEnv.ProjectName
	}
	if opts.Zone == "" {
		opts.Zone = buildEnv.RandomVMZone()
	}
	zone := opts.Zone
	if opts.DeleteIn == 0 {
		opts.DeleteIn = 30 * time.Minute
	}

	hconf, ok := dashboard.Hosts[hostType]
	if !ok {
		return nil, fmt.Errorf("invalid host type %q", hostType)
	}
	if !hconf.IsVM() && !hconf.IsContainer() {
		return nil, fmt.Errorf("host %q is type %q; want either a VM or container type", hostType, hconf.PoolName())
	}

	projectID := opts.ProjectID
	if projectID == "" {
		return nil, errors.New("buildlet: missing required ProjectID option")
	}

	prefix := "https://www.googleapis.com/compute/v1/projects/" + projectID
	machType := prefix + "/zones/" + zone + "/machineTypes/" + hconf.MachineType()
	diskType := "https://www.googleapis.com/compute/v1/projects/" + projectID + "/zones/" + zone + "/diskTypes/pd-ssd"
	if hconf.RegularDisk {
		diskType = "" // a spinning disk
	}

	srcImage := "https://www.googleapis.com/compute/v1/projects/" + projectID + "/global/images/" + hconf.VMImage
	minCPU := hconf.MinCPUPlatform
	if hconf.IsContainer() {
		if hconf.NestedVirt {
			minCPU = "Intel Cascade Lake" // n2 vms (which support NestedVirtualization) are either Ice Lake or Cascade Lake.
		}
		if vm := hconf.ContainerVMImage(); vm != "" {
			srcImage = "https://www.googleapis.com/compute/v1/projects/" + projectID + "/global/images/" + vm
		} else {
			var err error
			srcImage, err = cosImage(ctx, computeService)
			if err != nil {
				return nil, fmt.Errorf("error find Container-Optimized OS image: %v", err)
			}
		}
	}

	instance := &compute.Instance{
		Name:           instName,
		Description:    opts.Description,
		MachineType:    machType,
		MinCpuPlatform: minCPU,
		Disks: []*compute.AttachedDisk{
			{
				AutoDelete: true,
				Boot:       true,
				Type:       "PERSISTENT",
				InitializeParams: &compute.AttachedDiskInitializeParams{
					DiskName:    instName,
					SourceImage: srcImage,
					DiskType:    diskType,
				},
			},
		},
		Tags: &compute.Tags{
			// Warning: do NOT list "http-server" or "allow-ssh" (our
			// project's custom tag to allow ssh access) here; the
			// buildlet provides full remote code execution.
			// The https-server is authenticated, though.
			Items: []string{"https-server"},
		},
		Metadata: &compute.Metadata{},
		NetworkInterfaces: []*compute.NetworkInterface{{
			Network: prefix + "/global/networks/default-vpc",
		}},

		// Prior to git rev 1b1e086fd, we used preemptible
		// instances, as we were helping test the feature. It was
		// removed after git rev a23395d because we hadn't been
		// using it for some time. Our VMs are so short-lived that
		// the feature doesn't really help anyway. But if we ever
		// find we want it again, this comment is here to point to
		// code that might be useful to partially resurrect.
		Scheduling: &compute.Scheduling{Preemptible: false},
	}

	// Container builders use the COS image, which defaults to logging to Cloud Logging.
	// Permission is granted to this service account.
	if hconf.IsContainer() && buildEnv.COSServiceAccount != "" {
		instance.ServiceAccounts = []*compute.ServiceAccount{
			{
				Email:  buildEnv.COSServiceAccount,
				Scopes: []string{compute.CloudPlatformScope},
			},
		}
	}

	addMeta := func(key, value string) {
		instance.Metadata.Items = append(instance.Metadata.Items, &compute.MetadataItems{
			Key:   key,
			Value: &value,
		})
	}
	// The buildlet-binary-url is the URL of the buildlet binary
	// which the VMs are configured to download at boot and run.
	// This lets us/ update the buildlet more easily than
	// rebuilding the whole VM image.
	addMeta("buildlet-binary-url", hconf.BuildletBinaryURL(buildenv.ByProjectID(opts.ProjectID)))
	addMeta("buildlet-host-type", hostType)
	if !opts.TLS.IsZero() {
		addMeta("tls-cert", opts.TLS.CertPEM)
		addMeta("tls-key", opts.TLS.KeyPEM)
		addMeta("password", opts.TLS.Password())
	}
	if hconf.IsContainer() {
		addMeta("gce-container-declaration", fmt.Sprintf(`spec:
  containers:
    - name: buildlet
      image: 'gcr.io/%s/%s'
      volumeMounts:
        - name: tmpfs-0
          mountPath: /workdir
      securityContext:
        privileged: true
      stdin: false
      tty: false
  restartPolicy: Always
  volumes:
    - name: tmpfs-0
      emptyDir:
        medium: Memory
`, opts.ProjectID, hconf.ContainerImage))
	}

	if opts.DeleteIn > 0 {
		// In case the VM gets away from us (generally: if the
		// coordinator dies while a build is running), then we
		// set this attribute of when it should be killed so
		// we can kill it later when the coordinator is
		// restarted. The cleanUpOldVMs goroutine loop handles
		// that killing.
		addMeta("delete-at", fmt.Sprint(time.Now().Add(opts.DeleteIn).Unix()))
	}

	for k, v := range opts.Meta {
		addMeta(k, v)
	}

	apiGate()
	op, err := computeService.Instances.Insert(projectID, zone, instance).Do()
	if err != nil {
		return nil, fmt.Errorf("Failed to create instance: %v", err)
	}
	condRun(opts.OnInstanceRequested)
	createOp := op.Name

	// Wait for instance create operation to succeed.
OpLoop:
	for {
		time.Sleep(2 * time.Second)
		apiGate()
		op, err := computeService.ZoneOperations.Get(projectID, zone, createOp).Do()
		if err != nil {
			return nil, fmt.Errorf("Failed to get op %s: %v", createOp, err)
		}
		switch op.Status {
		case "PENDING", "RUNNING":
			continue
		case "DONE":
			if op.Error != nil {
				for _, operr := range op.Error.Errors {
					log.Printf("failed to create instance %s in zone %s: %v", instName, zone, operr.Code)
					// TODO: catch Code=="QUOTA_EXCEEDED" and "Message" and return
					// a known error value/type.
					return nil, fmt.Errorf("Error creating instance: %+v", operr)
				}
				return nil, errors.New("Failed to start.")
			}
			break OpLoop
		default:
			return nil, fmt.Errorf("Unknown create status %q: %+v", op.Status, op)
		}
	}
	condRun(opts.OnInstanceCreated)

	apiGate()
	inst, err := computeService.Instances.Get(projectID, zone, instName).Do()
	if err != nil {
		return nil, fmt.Errorf("Error getting instance %s details after creation: %v", instName, err)
	}

	// Finds its internal and/or external IP addresses.
	intIP, extIP := instanceIPs(inst)

	// Wait for it to boot and its buildlet to come up.
	var buildletURL string
	var ipPort string
	if !opts.TLS.IsZero() {
		if extIP == "" {
			return nil, errors.New("didn't find its external IP address")
		}
		buildletURL = "https://" + extIP
		ipPort = extIP + ":443"
	} else {
		if intIP == "" {
			return nil, errors.New("didn't find its internal IP address")
		}
		buildletURL = "http://" + intIP
		ipPort = intIP + ":80"
	}
	if opts.OnGotInstanceInfo != nil {
		opts.OnGotInstanceInfo(inst)
	}
	var closeFuncs []func()
	if opts.UseIAPTunnel {
		localPort, closeFunc, err := createIAPTunnel(ctx, inst)
		if err != nil {
			return nil, fmt.Errorf("creating IAP tunnel: %v", err)
		}
		buildletURL = "http://localhost:" + localPort
		ipPort = "127.0.0.1:" + localPort
		closeFuncs = append(closeFuncs, closeFunc)
	}
	client, err := buildletClient(ctx, buildletURL, ipPort, &opts)
	if err != nil {
		return nil, err
	}
	for _, cf := range closeFuncs {
		client.AddCloseFunc(cf)
	}
	return client, nil
}