in pkg/csi/cinder/nodeserver.go [121:227]
func nodePublishEphemeral(req *csi.NodePublishVolumeRequest, ns *nodeServer) (*csi.NodePublishVolumeResponse, error) {
var size int
var err error
volID := req.GetVolumeId()
volName := fmt.Sprintf("ephemeral-%s", volID)
properties := map[string]string{"cinder.csi.openstack.org/cluster": ns.Driver.cluster}
capacity, ok := req.GetVolumeContext()["capacity"]
volumeCapability := req.GetVolumeCapability()
volAvailability, err := ns.Metadata.GetAvailabilityZone()
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("retrieving availability zone from MetaData service failed with error %v", err))
}
size = 1 // default size is 1GB
if ok && strings.HasSuffix(capacity, "Gi") {
size, err = strconv.Atoi(strings.TrimSuffix(capacity, "Gi"))
if err != nil {
klog.V(3).Infof("Unable to parse capacity: %v", err)
return nil, status.Error(codes.Internal, fmt.Sprintf("Unable to parse capacity %v", err))
}
}
// Check type in given param, if not, use ""
volumeType, ok := req.GetVolumeContext()["type"]
if !ok {
volumeType = ""
}
evol, err := ns.Cloud.CreateVolume(volName, size, volumeType, volAvailability, "", "", &properties)
if err != nil {
klog.V(3).Infof("Failed to Create Ephemeral Volume: %v", err)
return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to create Ephemeral Volume %v", err))
}
// Wait for volume status to be Available, before attaching
if evol.Status != openstack.VolumeAvailableStatus {
targetStatus := []string{openstack.VolumeAvailableStatus}
err := ns.Cloud.WaitVolumeTargetStatus(evol.ID, targetStatus)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
}
klog.V(4).Infof("Ephemeral Volume %s is created", evol.ID)
// attach volume
// for attach volume we need to have information about node.
nodeID, err := ns.Metadata.GetInstanceID()
if err != nil {
msg := "nodePublishEphemeral: Failed to get Instance ID: %v"
klog.V(3).Infof(msg, err)
return nil, status.Errorf(codes.Internal, msg, err)
}
_, err = ns.Cloud.AttachVolume(nodeID, evol.ID)
if err != nil {
msg := "nodePublishEphemeral: attach volume %s failed with error : %v"
klog.V(3).Infof(msg, evol.ID, err)
return nil, status.Errorf(codes.Internal, msg, evol.ID, err)
}
err = ns.Cloud.WaitDiskAttached(nodeID, evol.ID)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
m := ns.Mount
devicePath, err := getDevicePath(evol.ID, m)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("Unable to find Device path for volume: %v", err))
}
targetPath := req.GetTargetPath()
// Verify whether mounted
notMnt, err := m.IsLikelyNotMountPointAttach(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
// Volume Mount
if notMnt {
// set default fstype is ext4
fsType := "ext4"
var options []string
if mnt := volumeCapability.GetMount(); mnt != nil {
if mnt.FsType != "" {
fsType = mnt.FsType
}
mountFlags := mnt.GetMountFlags()
options = append(options, collectMountOptions(fsType, mountFlags)...)
}
// Mount
err = m.Mounter().FormatAndMount(devicePath, targetPath, fsType, nil)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
return &csi.NodePublishVolumeResponse{}, nil
}