func()

in pkg/providers/v1/aws.go [2613:2707]


func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
	disk, err := newAWSDisk(c, diskName)
	if err != nil {
		return "", err
	}

	awsInstance, info, err := c.getFullInstance(nodeName)
	if err != nil {
		return "", fmt.Errorf("error finding instance %s: %q", nodeName, err)
	}

	// mountDevice will hold the device where we should try to attach the disk
	var mountDevice mountDevice
	// alreadyAttached is true if we have already called AttachVolume on this disk
	var alreadyAttached bool

	// attachEnded is set to true if the attach operation completed
	// (successfully or not), and is thus no longer in progress
	attachEnded := false
	defer func() {
		if attachEnded {
			if !c.endAttaching(awsInstance, disk.awsID, mountDevice) {
				klog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID)
			}
		}
	}()

	mountDevice, alreadyAttached, err = c.getMountDevice(awsInstance, info, disk.awsID, true)
	if err != nil {
		return "", err
	}

	// Inside the instance, the mountpoint always looks like /dev/xvdX (?)
	hostDevice := "/dev/xvd" + string(mountDevice)
	// We are using xvd names (so we are HVM only)
	// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
	ec2Device := "/dev/xvd" + string(mountDevice)

	if !alreadyAttached {
		available, err := c.checkIfAvailable(disk, "attaching", awsInstance.awsID)
		if err != nil {
			klog.Error(err)
		}

		if !available {
			attachEnded = true
			return "", err
		}
		request := &ec2.AttachVolumeInput{
			Device:     aws.String(ec2Device),
			InstanceId: aws.String(awsInstance.awsID),
			VolumeId:   disk.awsID.awsString(),
		}

		attachResponse, err := c.ec2.AttachVolume(request)
		if err != nil {
			attachEnded = true
			// TODO: Check if the volume was concurrently attached?
			return "", wrapAttachError(err, disk, awsInstance.awsID)
		}
		if da, ok := c.deviceAllocators[awsInstance.nodeName]; ok {
			da.Deprioritize(mountDevice)
		}
		klog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse)
	}

	attachment, err := disk.waitForAttachmentStatus("attached", awsInstance.awsID, ec2Device, alreadyAttached)

	if err != nil {
		if err == wait.ErrWaitTimeout {
			c.applyUnSchedulableTaint(nodeName, "Volume stuck in attaching state - node needs reboot to fix impaired state.")
		}
		return "", err
	}

	// The attach operation has finished
	attachEnded = true

	// Double check the attachment to be 100% sure we attached the correct volume at the correct mountpoint
	// It could happen otherwise that we see the volume attached from a previous/separate AttachVolume call,
	// which could theoretically be against a different device (or even instance).
	if attachment == nil {
		// Impossible?
		return "", fmt.Errorf("unexpected state: attachment nil after attached %q to %q", diskName, nodeName)
	}
	if ec2Device != aws.StringValue(attachment.Device) {
		// Already checked in waitForAttachmentStatus(), but just to be sure...
		return "", fmt.Errorf("disk attachment of %q to %q failed: requested device %q but found %q", diskName, nodeName, ec2Device, aws.StringValue(attachment.Device))
	}
	if awsInstance.awsID != aws.StringValue(attachment.InstanceId) {
		return "", fmt.Errorf("disk attachment of %q to %q failed: requested instance %q but found %q", diskName, nodeName, awsInstance.awsID, aws.StringValue(attachment.InstanceId))
	}

	return hostDevice, nil
}