in internal/onetime/hanadiskrestore/hanadiskrestore.go [545:648]
func (r *Restorer) checkPreConditions(ctx context.Context, cp *ipb.CloudProperties, checkDataDir getDataPaths, checkLogDir getLogPaths, exec commandlineexecutor.Execute) error {
var err error
if r.baseDataPath, r.logicalDataPath, r.physicalDataPath, err = checkDataDir(ctx, exec); err != nil {
return err
}
if r.baseLogPath, r.logicalLogPath, r.physicalLogPath, err = checkLogDir(ctx, exec); err != nil {
return err
}
log.CtxLogger(ctx).Infow("Checking preconditions", "Data directory", r.baseDataPath, "Data file system",
r.logicalDataPath, "Data physical volume", r.physicalDataPath, "Log directory", r.baseLogPath,
"Log file system", r.logicalLogPath, "Log physical volume", r.physicalLogPath)
if strings.Contains(r.physicalDataPath, r.physicalLogPath) {
return fmt.Errorf("unsupported: HANA data and HANA log are on the same physical disk - %s", r.physicalDataPath)
}
if r.DataDiskName == "" || r.DataDiskZone == "" || r.isGroupSnapshot {
if err := r.readDiskMapping(ctx, cp, &instanceinfo.PhysicalPathReader{OS: runtime.GOOS}); err != nil {
return fmt.Errorf("failed to read disks backing /hana/data: %v", err)
}
}
// Verify the disk is attached to the instance.
if !r.isGroupSnapshot {
dev, ok, err := r.gceService.DiskAttachedToInstance(r.Project, r.DataDiskZone, cp.GetInstanceName(), r.DataDiskName)
if err != nil {
return fmt.Errorf("failed to verify if disk %v is attached to the instance", r.DataDiskName)
}
if !ok {
return fmt.Errorf("the disk data-disk-name=%v is not attached to the instance, please pass the current data disk name", r.DataDiskName)
}
r.DataDiskDeviceName = dev
} else {
if ok, err := r.multiDisksAttachedToInstance(ctx, cp, exec); err != nil {
return fmt.Errorf("failed to verify if disks are attached to the instance: %v", err)
} else if !ok {
return fmt.Errorf("the disks are not attached to the instance, please pass the verify the disks provided are attached to the instance")
}
}
// Verify the snapshot is present.
if !r.isGroupSnapshot {
if r.computeService == nil {
return fmt.Errorf("compute service is nil")
}
snapshot, err := r.computeService.Snapshots.Get(r.Project, r.SourceSnapshot).Do()
if err != nil {
return fmt.Errorf("failed to check if source-snapshot=%v is present: %v", r.SourceSnapshot, err)
}
r.extractLabels(ctx, snapshot)
} else {
snapshotList, err := r.gceService.ListSnapshots(ctx, r.Project)
if err != nil {
return fmt.Errorf("failed to list snapshots: %v", err)
}
var numOfSnapshots int
for _, snapshot := range snapshotList.Items {
if snapshot.Labels["goog-sapagent-isg"] == r.GroupSnapshot {
r.extractLabels(ctx, snapshot)
numOfSnapshots++
}
}
if numOfSnapshots != len(r.disks) {
return fmt.Errorf("did not get required number of snapshots for restoration, wanted: %v, got: %v", len(r.disks), numOfSnapshots)
}
}
if r.isGroupSnapshot && r.NewDiskPrefix != "" {
for diskNum := range len(r.disks) {
newDiskName := fmt.Sprintf("%s-%s", r.NewDiskPrefix, fmt.Sprintf("%d", diskNum+1))
disk, err := r.gceService.GetDisk(r.Project, r.DataDiskZone, newDiskName)
if disk != nil && err == nil {
return fmt.Errorf("disk with name %v already exists, please choose a different prefix", newDiskName)
}
}
}
if r.NewDiskType == "" {
if !r.isGroupSnapshot {
d, err := r.computeService.Disks.Get(r.Project, r.DataDiskZone, r.DataDiskName).Do()
if err != nil {
return fmt.Errorf("failed to read data disk type: %v", err)
}
r.NewDiskType = d.Type
log.CtxLogger(ctx).Infow("New disk type will be same as the data-disk-name", "diskType", r.NewDiskType)
} else {
disk, err := r.gceService.GetDisk(r.Project, r.DataDiskZone, r.disks[0].disk.GetDiskName())
if err != nil {
return fmt.Errorf("failed to read data disk type: %v", err)
}
r.NewDiskType = disk.Type
log.CtxLogger(ctx).Infow("New disk type will be same as the data-disk-name", "diskType", r.NewDiskType)
}
} else {
r.NewDiskType = fmt.Sprintf("projects/%s/zones/%s/diskTypes/%s", r.Project, r.DataDiskZone, r.NewDiskType)
}
if err := r.validateDisksBelongToCG(ctx); err != nil {
return err
}
return nil
}