in server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java [1100:1399]
public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationException {
Long newSize = cmd.getSize();
Long newMinIops = cmd.getMinIops();
Long newMaxIops = cmd.getMaxIops();
Integer newHypervisorSnapshotReserve = null;
boolean shrinkOk = cmd.isShrinkOk();
boolean autoMigrateVolume = cmd.getAutoMigrate();
VolumeVO volume = _volsDao.findById(cmd.getEntityId());
if (volume == null) {
throw new InvalidParameterValueException("No such volume");
}
// checking if there are any ongoing snapshots on the volume which is to be resized
List<SnapshotVO> ongoingSnapshots = _snapshotDao.listByStatus(cmd.getId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp);
if (ongoingSnapshots.size() > 0) {
throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on this volume, resize volume is not permitted, please try again later.");
}
/* Does the caller have authority to act on this volume? */
_accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
DiskOfferingVO newDiskOffering = null;
if (cmd.getNewDiskOfferingId() != null) {
newDiskOffering = _diskOfferingDao.findById(cmd.getNewDiskOfferingId());
}
/* Only works for KVM/XenServer/VMware (or "Any") for now, and volumes with 'None' since they're just allocated in DB */
HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId());
if (!SupportedHypervisorsForVolResize.contains(hypervisorType)) {
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support volume resize");
}
if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) {
throw new InvalidParameterValueException("Volume should be in ready or allocated state before attempting a resize. Volume " + volume.getUuid() + " is in state " + volume.getState() + ".");
}
// if we are to use the existing disk offering
if (newDiskOffering == null) {
newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve();
// if the caller is looking to change the size of the volume
if (newSize != null) {
if (diskOffering.getDiskSizeStrictness()) {
throw new InvalidParameterValueException(String.format("Resize of volume %s is not allowed, since disk size is strictly fixed as per the disk offering", volume.getUuid()));
}
if (diskOffering.isCustomized()) {
validateCustomDiskOfferingSizeRange(newSize);
}
if (isNotPossibleToResize(volume, diskOffering)) {
throw new InvalidParameterValueException(
"Failed to resize Root volume. The service offering of this Volume has been configured with a root disk size; "
+ "on such case a Root Volume can only be resized when changing to another Service Offering with a Root disk size. "
+ "For more details please check out the Official Resizing Volumes documentation.");
}
// convert from bytes to GiB
newSize = newSize << 30;
} else {
// no parameter provided; just use the original size of the volume
newSize = volume.getSize();
}
if (newMinIops != null) {
if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) {
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Min IOPS' parameter.");
}
} else {
// no parameter provided; just use the original min IOPS of the volume
newMinIops = volume.getMinIops();
}
if (newMaxIops != null) {
if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) {
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Max IOPS' parameter.");
}
} else {
// no parameter provided; just use the original max IOPS of the volume
newMaxIops = volume.getMaxIops();
}
validateIops(newMinIops, newMaxIops, volume.getPoolType());
} else {
if (newDiskOffering.getRemoved() != null) {
throw new InvalidParameterValueException("Requested disk offering has been removed.");
}
if (diskOffering.getDiskSizeStrictness() != newDiskOffering.getDiskSizeStrictness()) {
throw new InvalidParameterValueException("Disk offering size strictness does not match with new disk offering");
}
if (diskOffering.getDiskSizeStrictness() && (diskOffering.getDiskSize() != newDiskOffering.getDiskSize())) {
throw new InvalidParameterValueException(String.format("Resize volume for %s is not allowed since disk offering's size is fixed", volume.getName()));
}
Long instanceId = volume.getInstanceId();
VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(instanceId);
if (volume.getVolumeType().equals(Volume.Type.ROOT)) {
ServiceOfferingVO serviceOffering = _serviceOfferingDao.findById(vmInstanceVO.getServiceOfferingId());
if (serviceOffering != null && serviceOffering.getDiskOfferingStrictness()) {
throw new InvalidParameterValueException(String.format("Cannot resize ROOT volume [%s] with new disk offering since existing disk offering is strictly assigned to the ROOT volume.", volume.getName()));
}
if (newDiskOffering.getEncrypt() != diskOffering.getEncrypt()) {
throw new InvalidParameterValueException(
String.format("Current disk offering's encryption(%s) does not match target disk offering's encryption(%s)", diskOffering.getEncrypt(), newDiskOffering.getEncrypt())
);
}
}
if (diskOffering.getTags() != null) {
if (!com.cloud.utils.StringUtils.areTagsEqual(diskOffering.getTags(), newDiskOffering.getTags())) {
throw new InvalidParameterValueException("The tags on the new and old disk offerings must match.");
}
} else if (newDiskOffering.getTags() != null) {
throw new InvalidParameterValueException("There are no tags on the current disk offering. The new disk offering needs to have no tags, as well.");
}
_configMgr.checkDiskOfferingAccess(_accountMgr.getActiveAccountById(volume.getAccountId()), newDiskOffering, _dcDao.findById(volume.getDataCenterId()));
if (newDiskOffering.getDiskSize() > 0 && !newDiskOffering.isComputeOnly()) {
newSize = newDiskOffering.getDiskSize();
} else if (newDiskOffering.isCustomized()) {
newSize = cmd.getSize();
if (newSize == null) {
throw new InvalidParameterValueException("The new disk offering requires that a size be specified.");
}
validateCustomDiskOfferingSizeRange(newSize);
// convert from GiB to bytes
newSize = newSize << 30;
} else {
if (cmd.getSize() != null) {
throw new InvalidParameterValueException("You cannot pass in a custom disk size to a non-custom disk offering.");
}
newSize = newDiskOffering.getDiskSize();
}
checkIfVolumeIsRootAndVmIsRunning(newSize, volume, vmInstanceVO);
if (newDiskOffering.isCustomizedIops() != null && newDiskOffering.isCustomizedIops()) {
newMinIops = cmd.getMinIops() != null ? cmd.getMinIops() : volume.getMinIops();
newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops();
validateIops(newMinIops, newMaxIops, volume.getPoolType());
} else {
newMinIops = newDiskOffering.getMinIops();
newMaxIops = newDiskOffering.getMaxIops();
}
// if the hypervisor snapshot reserve value is null, it must remain null (currently only KVM uses null and null is all KVM uses for a value here)
newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve() != null ? newDiskOffering.getHypervisorSnapshotReserve() : null;
}
long currentSize = volume.getSize();
VolumeInfo volInfo = volFactory.getVolume(volume.getId());
boolean isEncryptionRequired = volume.getPassphraseId() != null;
if (newDiskOffering != null) {
isEncryptionRequired = newDiskOffering.getEncrypt();
}
DataStore dataStore = volInfo.getDataStore();
if (dataStore != null && dataStore.getDriver() instanceof PrimaryDataStoreDriver) {
newSize = ((PrimaryDataStoreDriver) dataStore.getDriver()).getVolumeSizeRequiredOnPool(newSize, null, isEncryptionRequired);
}
validateVolumeResizeWithSize(volume, currentSize, newSize, shrinkOk, diskOffering, newDiskOffering);
// Note: The storage plug-in in question should perform validation on the IOPS to check if a sufficient number of IOPS is available to perform
// the requested change
/* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */
// We need to publish this event to usage_volume table
if (volume.getState() == Volume.State.Allocated) {
logger.debug("Volume is in the allocated state, but has never been created. Simply updating database with new size and IOPS.");
volume.setSize(newSize);
volume.setMinIops(newMinIops);
volume.setMaxIops(newMaxIops);
volume.setHypervisorSnapshotReserve(newHypervisorSnapshotReserve);
if (newDiskOffering != null) {
volume.setDiskOfferingId(cmd.getNewDiskOfferingId());
}
_volsDao.update(volume.getId(), volume);
_resourceLimitMgr.updateVolumeResourceCountForDiskOfferingChange(volume.getAccountId(), volume.isDisplayVolume(), currentSize, newSize,
diskOffering, newDiskOffering);
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_RESIZE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(),
volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid());
return volume;
}
Long newDiskOfferingId = newDiskOffering != null ? newDiskOffering.getId() : diskOffering.getId();
boolean volumeMigrateRequired = false;
List<? extends StoragePool> suitableStoragePoolsWithEnoughSpace = null;
StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId());
if (!storageMgr.storagePoolHasEnoughSpaceForResize(storagePool, currentSize, newSize)) {
if (!autoMigrateVolume) {
throw new CloudRuntimeException(String.format("Failed to resize volume %s since the storage pool does not have enough space to accommodate new size for the volume %s, try with automigrate set to true in order to check in the other suitable pools for the new size and then migrate & resize volume there.", volume.getUuid(), volume.getName()));
}
Pair<List<? extends StoragePool>, List<? extends StoragePool>> poolsPair = managementService.listStoragePoolsForSystemMigrationOfVolume(volume.getId(), newDiskOfferingId, currentSize, newMinIops, newMaxIops, true, false);
List<? extends StoragePool> suitableStoragePools = poolsPair.second();
if (CollectionUtils.isEmpty(poolsPair.first()) && CollectionUtils.isEmpty(poolsPair.second())) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume resize failed for volume ID: %s as no suitable pool(s) found for migrating to support new disk offering or new size", volume.getUuid()));
}
final Long newSizeFinal = newSize;
suitableStoragePoolsWithEnoughSpace = suitableStoragePools.stream().filter(pool -> storageMgr.storagePoolHasEnoughSpaceForResize(pool, 0L, newSizeFinal)).collect(Collectors.toList());
if (CollectionUtils.isEmpty(suitableStoragePoolsWithEnoughSpace)) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume resize failed for volume ID: %s as no suitable pool(s) with enough space found.", volume.getUuid()));
}
Collections.shuffle(suitableStoragePoolsWithEnoughSpace);
volumeMigrateRequired = true;
}
boolean volumeResizeRequired = false;
if (currentSize != newSize || !compareEqualsIncludingNullOrZero(newMaxIops, volume.getMaxIops()) || !compareEqualsIncludingNullOrZero(newMinIops, volume.getMinIops())) {
volumeResizeRequired = true;
}
if (!volumeMigrateRequired && !volumeResizeRequired && newDiskOffering != null) {
_volsDao.updateDiskOffering(volume.getId(), newDiskOffering.getId());
volume = _volsDao.findById(volume.getId());
updateStorageWithTheNewDiskOffering(volume, newDiskOffering);
return volume;
}
if (volumeMigrateRequired) {
MigrateVolumeCmd migrateVolumeCmd = new MigrateVolumeCmd(volume.getId(), suitableStoragePoolsWithEnoughSpace.get(0).getId(), newDiskOfferingId, true);
try {
Volume result = migrateVolume(migrateVolumeCmd);
volume = (result != null) ? _volsDao.findById(result.getId()) : null;
if (volume == null) {
throw new CloudRuntimeException(String.format("Volume resize operation failed for volume ID: %s as migration failed to storage pool %s accommodating new size", volume.getUuid(), suitableStoragePoolsWithEnoughSpace.get(0).getId()));
}
} catch (Exception e) {
throw new CloudRuntimeException(String.format("Volume resize operation failed for volume ID: %s as migration failed to storage pool %s accommodating new size", volume.getUuid(), suitableStoragePoolsWithEnoughSpace.get(0).getId()));
}
}
UserVmVO userVm = _userVmDao.findById(volume.getInstanceId());
if (userVm != null) {
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
placeHolder = createPlaceHolderWork(userVm.getId());
try {
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve,
newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
} finally {
_workJobDao.expunge(placeHolder.getId());
}
} else {
Outcome<Volume> outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve,
newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
try {
outcome.get();
} catch (InterruptedException e) {
throw new RuntimeException("Operation was interrupted", e);
} catch (ExecutionException e) {
throw new RuntimeException("Execution exception", e);
}
Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException) {
throw (ConcurrentOperationException) jobResult;
} else if (jobResult instanceof ResourceAllocationException) {
throw (ResourceAllocationException) jobResult;
} else if (jobResult instanceof RuntimeException) {
throw (RuntimeException) jobResult;
} else if (jobResult instanceof Throwable) {
throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
} else if (jobResult instanceof Long) {
return _volsDao.findById((Long) jobResult);
}
}
return volume;
}
}
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null,
shrinkOk);
}