in cmd/sync.go [120:378]
func (raw *rawSyncCmdArgs) cook() (cookedSyncCmdArgs, error) {
cooked := cookedSyncCmdArgs{}
// set up the front end scanning logger
azcopyScanningLogger = common.NewJobLogger(azcopyCurrentJobID, azcopyLogVerbosity, azcopyLogPathFolder, "-scanning")
azcopyScanningLogger.OpenLog()
glcm.RegisterCloseFunc(func() {
azcopyScanningLogger.CloseLog()
})
// if no logging, set this empty so that we don't display the log location
if azcopyLogVerbosity == common.LogNone {
azcopyLogPathFolder = ""
}
// this if statement ladder remains instead of being separated to help determine valid combinations for sync
// consider making a map of valid source/dest combos and consolidating this to generic source/dest setups, akin to the lower if statement
// TODO: if expand the set of source/dest combos supported by sync, update this method the declarative test framework:
var err error
err = cooked.trailingDot.Parse(raw.trailingDot)
if err != nil {
return cooked, err
}
cooked.fromTo, err = ValidateFromTo(raw.src, raw.dst, raw.fromTo)
if err != nil {
return cooked, err
}
// display a warning message to console and job log file if there is a sync operation being performed from local to file share.
// Reference : https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-files#synchronize-files
if cooked.fromTo == common.EFromTo.LocalFile() {
glcm.Warn(LocalToFileShareWarnMsg)
if jobsAdmin.JobsAdmin != nil {
jobsAdmin.JobsAdmin.LogToJobLog(LocalToFileShareWarnMsg, common.LogWarning)
}
if raw.dryrun {
glcm.Dryrun(func(of common.OutputFormat) string {
if of == common.EOutputFormat.Json() {
var out struct {
Warn string `json:"warn"`
}
out.Warn = LocalToFileShareWarnMsg
buf, _ := json.Marshal(out)
return string(buf)
}
return fmt.Sprintf("DRYRUN: warn %s", LocalToFileShareWarnMsg)
})
}
}
switch cooked.fromTo {
case common.EFromTo.Unknown():
return cooked, fmt.Errorf("Unable to infer the source '%s' / destination '%s'. ", raw.src, raw.dst)
case common.EFromTo.LocalBlob(), common.EFromTo.LocalFile(), common.EFromTo.LocalBlobFS():
cooked.destination, err = SplitResourceString(raw.dst, cooked.fromTo.To())
common.PanicIfErr(err)
case common.EFromTo.BlobLocal(), common.EFromTo.FileLocal(), common.EFromTo.BlobFSLocal():
cooked.source, err = SplitResourceString(raw.src, cooked.fromTo.From())
common.PanicIfErr(err)
case common.EFromTo.BlobBlob(), common.EFromTo.FileFile(), common.EFromTo.BlobFile(), common.EFromTo.FileBlob(), common.EFromTo.BlobFSBlobFS(), common.EFromTo.BlobFSBlob(), common.EFromTo.BlobFSFile(), common.EFromTo.BlobBlobFS(), common.EFromTo.FileBlobFS():
cooked.destination, err = SplitResourceString(raw.dst, cooked.fromTo.To())
common.PanicIfErr(err)
cooked.source, err = SplitResourceString(raw.src, cooked.fromTo.From())
common.PanicIfErr(err)
default:
return cooked, fmt.Errorf("source '%s' / destination '%s' combination '%s' not supported for sync command ", raw.src, raw.dst, cooked.fromTo)
}
// Do this check separately so we don't end up with a bunch of code duplication when new src/dstn are added
if cooked.fromTo.From() == common.ELocation.Local() {
cooked.source = common.ResourceString{Value: common.ToExtendedPath(cleanLocalPath(raw.src))}
} else if cooked.fromTo.To() == common.ELocation.Local() {
cooked.destination = common.ResourceString{Value: common.ToExtendedPath(cleanLocalPath(raw.dst))}
}
// we do not support service level sync yet
if cooked.fromTo.From().IsRemote() {
err = validateURLIsNotServiceLevel(cooked.source.Value, cooked.fromTo.From())
if err != nil {
return cooked, err
}
}
// we do not support service level sync yet
if cooked.fromTo.To().IsRemote() {
err = validateURLIsNotServiceLevel(cooked.destination.Value, cooked.fromTo.To())
if err != nil {
return cooked, err
}
}
// use the globally generated JobID
cooked.jobID = azcopyCurrentJobID
cooked.blockSize, err = blockSizeInBytes(raw.blockSizeMB)
if err != nil {
return cooked, err
}
cooked.putBlobSize, err = blockSizeInBytes(raw.putBlobSizeMB)
if err != nil {
return cooked, err
}
if err = cooked.symlinkHandling.Determine(raw.followSymlinks, raw.preserveSymlinks); err != nil {
return cooked, err
}
cooked.recursive = raw.recursive
cooked.forceIfReadOnly = raw.forceIfReadOnly
if err = validateForceIfReadOnly(cooked.forceIfReadOnly, cooked.fromTo); err != nil {
return cooked, err
}
cooked.backupMode = raw.backupMode
if err = validateBackupMode(cooked.backupMode, cooked.fromTo); err != nil {
return cooked, err
}
// determine whether we should prompt the user to delete extra files
err = cooked.deleteDestination.Parse(raw.deleteDestination)
if err != nil {
return cooked, err
}
// warn on legacy filters
if raw.legacyInclude != "" || raw.legacyExclude != "" {
return cooked, fmt.Errorf("the include and exclude parameters have been replaced by include-pattern and exclude-pattern. They work on filenames only (not paths)")
}
// parse the filter patterns
cooked.includePatterns = parsePatterns(raw.include)
cooked.excludePatterns = parsePatterns(raw.exclude)
cooked.excludePaths = parsePatterns(raw.excludePath)
// parse the attribute filter patterns
cooked.includeFileAttributes = parsePatterns(raw.includeFileAttributes)
cooked.excludeFileAttributes = parsePatterns(raw.excludeFileAttributes)
cooked.preserveSMBInfo = raw.preserveSMBInfo && areBothLocationsSMBAware(cooked.fromTo)
if err = validatePreserveSMBPropertyOption(cooked.preserveSMBInfo, cooked.fromTo, nil, "preserve-smb-info"); err != nil {
return cooked, err
}
isUserPersistingPermissions := raw.preserveSMBPermissions || raw.preservePermissions
if cooked.preserveSMBInfo && !isUserPersistingPermissions {
glcm.Info("Please note: the preserve-permissions flag is set to false, thus AzCopy will not copy SMB ACLs between the source and destination. To learn more: https://aka.ms/AzCopyandAzureFiles.")
}
if err = validatePreserveSMBPropertyOption(isUserPersistingPermissions, cooked.fromTo, nil, PreservePermissionsFlag); err != nil {
return cooked, err
}
// TODO: the check on raw.preservePermissions on the next line can be removed once we have full support for these properties in sync
// if err = validatePreserveOwner(raw.preserveOwner, cooked.fromTo); raw.preservePermissions && err != nil {
// return cooked, err
// }
cooked.preservePermissions = common.NewPreservePermissionsOption(isUserPersistingPermissions, raw.preserveOwner, cooked.fromTo)
cooked.preservePOSIXProperties = raw.preservePOSIXProperties
if cooked.preservePOSIXProperties && !areBothLocationsPOSIXAware(cooked.fromTo) {
return cooked, fmt.Errorf("in order to use --preserve-posix-properties, both the source and destination must be POSIX-aware (valid pairings are Linux->Blob, Blob->Linux, Blob->Blob)")
}
if err = cooked.compareHash.Parse(raw.compareHash); err != nil {
return cooked, err
} else {
switch cooked.compareHash {
case common.ESyncHashType.MD5():
// Save any new MD5s on files we download.
raw.putMd5 = true
default: // no need to put a hash of any kind.
}
}
if err = common.LocalHashStorageMode.Parse(raw.localHashStorageMode); err != nil {
return cooked, err
}
cooked.putMd5 = raw.putMd5
if err = validatePutMd5(cooked.putMd5, cooked.fromTo); err != nil {
return cooked, err
}
err = cooked.md5ValidationOption.Parse(raw.md5ValidationOption)
if err != nil {
return cooked, err
}
if err = validateMd5Option(cooked.md5ValidationOption, cooked.fromTo); err != nil {
return cooked, err
}
if cooked.fromTo.IsS2S() {
cooked.preserveAccessTier = raw.s2sPreserveAccessTier
}
// Check if user has provided `s2s-preserve-blob-tags` flag.
// If yes, we have to ensure that both source and destination must be blob storages.
if raw.s2sPreserveBlobTags {
if cooked.fromTo.From() != common.ELocation.Blob() || cooked.fromTo.To() != common.ELocation.Blob() {
return cooked, fmt.Errorf("either source or destination is not a blob storage. " +
"blob index tags is a property of blobs only therefore both source and destination must be blob storage")
} else {
cooked.s2sPreserveBlobTags = raw.s2sPreserveBlobTags
}
}
// Setting CPK-N
cpkOptions := common.CpkOptions{}
// Setting CPK-N
if raw.cpkScopeInfo != "" {
if raw.cpkInfo {
return cooked, fmt.Errorf("cannot use both cpk-by-name and cpk-by-value at the same time")
}
cpkOptions.CpkScopeInfo = raw.cpkScopeInfo
}
// Setting CPK-V
// Get the key (EncryptionKey and EncryptionKeySHA256) value from environment variables when required.
cpkOptions.CpkInfo = raw.cpkInfo
// We only support transfer from source encrypted by user key when user wishes to download.
// Due to service limitation, S2S transfer is not supported for source encrypted by user key.
if cooked.fromTo.IsDownload() && (cpkOptions.CpkScopeInfo != "" || cpkOptions.CpkInfo) {
glcm.Info("Client Provided Key for encryption/decryption is provided for download scenario. " +
"Assuming source is encrypted.")
cpkOptions.IsSourceEncrypted = true
}
cooked.cpkOptions = cpkOptions
cooked.mirrorMode = raw.mirrorMode
cooked.includeRegex = parsePatterns(raw.includeRegex)
cooked.excludeRegex = parsePatterns(raw.excludeRegex)
cooked.dryrunMode = raw.dryrun
if azcopyOutputVerbosity == common.EOutputVerbosity.Quiet() || azcopyOutputVerbosity == common.EOutputVerbosity.Essential() {
if cooked.deleteDestination == common.EDeleteDestination.Prompt() {
err = fmt.Errorf("cannot set output level '%s' with delete-destination option '%s'", azcopyOutputVerbosity.String(), cooked.deleteDestination.String())
} else if cooked.dryrunMode {
err = fmt.Errorf("cannot set output level '%s' with dry-run mode", azcopyOutputVerbosity.String())
}
}
if err != nil {
return cooked, err
}
cooked.deleteDestinationFileIfNecessary = raw.deleteDestinationFileIfNecessary
cooked.includeDirectoryStubs = raw.includeDirectoryStubs
cooked.includeRoot = raw.includeRoot
return cooked, nil
}