in jobsAdmin/init.go [498:641]
func resurrectJobSummary(jm ste.IJobMgr) common.ListJobSummaryResponse {
js := common.ListJobSummaryResponse{
Timestamp: time.Now().UTC(),
JobID: jm.JobID(),
ErrorMsg: "",
JobStatus: common.EJobStatus.InProgress(), // Default
CompleteJobOrdered: false, // default to false; returns true if ALL job parts have been ordered
FailedTransfers: []common.TransferDetail{},
}
// To avoid race condition: get overall status BEFORE we get counts of completed files)
// (if we get it afterwards, we can get a cases where the counts haven't reached 100% done, but by the time we
// get the status, the job IS finished - and so we report completion with a lower total file count than what the job really had).
// Better to check overall status first, and see it as uncompleted on this call (and completed on the next call).
part0, ok := jm.JobPartMgr(0)
if !ok {
panic(fmt.Errorf("error getting the 0th part of Job %s", jm.JobID()))
}
part0PlanStatus := part0.Plan().JobStatus()
// Now iterate and count things up, rebuild job summary by examining the current state of all transfers
jm.IterateJobParts(true, func(partNum common.PartNumber, jpm ste.IJobPartMgr) {
jpp := jpm.Plan()
js.CompleteJobOrdered = js.CompleteJobOrdered || jpp.IsFinalPart
js.TotalTransfers += jpp.NumTransfers
// Iterate through this job part's transfers
for t := uint32(0); t < jpp.NumTransfers; t++ {
// transferHeader represents the memory map transfer header of transfer at index position for given job and part number
jppt := jpp.Transfer(t)
js.TotalBytesEnumerated += uint64(jppt.SourceSize)
switch jppt.EntityType {
case common.EEntityType.File():
js.FileTransfers++
case common.EEntityType.Folder():
js.FolderPropertyTransfers++
case common.EEntityType.Symlink():
js.SymlinkTransfers++
}
// check for all completed transfer to calculate the progress percentage at the end
switch jppt.TransferStatus() {
case common.ETransferStatus.NotStarted(),
common.ETransferStatus.FolderCreated(),
common.ETransferStatus.Started(),
common.ETransferStatus.Restarted(),
common.ETransferStatus.Cancelled():
js.TotalBytesExpected += uint64(jppt.SourceSize)
case common.ETransferStatus.Success():
js.TransfersCompleted++
js.TotalBytesTransferred += uint64(jppt.SourceSize)
js.TotalBytesExpected += uint64(jppt.SourceSize)
case common.ETransferStatus.Failed(),
common.ETransferStatus.TierAvailabilityCheckFailure(),
common.ETransferStatus.BlobTierFailure():
js.TransfersFailed++
// getting the source and destination for failed transfer at position - index
src, dst, isFolder := jpp.TransferSrcDstStrings(t)
// appending to list of failed transfer
js.FailedTransfers = append(js.FailedTransfers,
common.TransferDetail{
Src: src,
Dst: dst,
IsFolderProperties: isFolder,
TransferStatus: common.ETransferStatus.Failed(),
ErrorCode: jppt.ErrorCode()}) // TODO: Optimize
case common.ETransferStatus.SkippedEntityAlreadyExists(),
common.ETransferStatus.SkippedBlobHasSnapshots():
js.TransfersSkipped++
// getting the source and destination for skipped transfer at position - index
src, dst, isFolder := jpp.TransferSrcDstStrings(t)
js.SkippedTransfers = append(js.SkippedTransfers,
common.TransferDetail{
Src: src,
Dst: dst,
IsFolderProperties: isFolder,
TransferStatus: jppt.TransferStatus(),
})
}
}
})
mu.Lock()
// Add on byte count from files in flight, to get a more accurate running total
// Check is added to prevent double counting
if js.TotalBytesTransferred+jm.SuccessfulBytesInActiveFiles() <= js.TotalBytesExpected {
js.TotalBytesTransferred += jm.SuccessfulBytesInActiveFiles()
}
mu.Unlock()
if js.TotalBytesExpected == 0 {
// if no bytes expected, and we should avoid dividing by 0 (which results in NaN)
js.PercentComplete = 100
} else {
js.PercentComplete = 100 * float32(js.TotalBytesTransferred) / float32(js.TotalBytesExpected)
}
if js.PercentComplete > 100 {
js.PercentComplete = 100
}
// This is added to let FE to continue fetching the Job Progress Summary
// in case of resume. In case of resume, the Job is already completely
// ordered so the progress summary should be fetched until all job parts
// are iterated and have been scheduled
js.CompleteJobOrdered = js.CompleteJobOrdered || jm.AllTransfersScheduled()
js.BytesOverWire = uint64(JobsAdmin.BytesOverWire())
// Get the number of active go routines performing the transfer or executing the chunk Func
// TODO: added for debugging purpose. remove later (is covered by GetPerfInfo now anyway)
js.ActiveConnections = jm.ActiveConnections()
js.PerfStrings, js.PerfConstraint = jm.GetPerfInfo()
pipeStats := jm.PipelineNetworkStats()
if pipeStats != nil {
js.AverageIOPS = pipeStats.OperationsPerSecond()
js.AverageE2EMilliseconds = pipeStats.AverageE2EMilliseconds()
js.NetworkErrorPercentage = pipeStats.NetworkErrorPercentage()
js.ServerBusyPercentage = pipeStats.TotalServerBusyPercentage()
}
// If the status is cancelled, then no need to check for completerJobOrdered
// since user must have provided the consent to cancel an incompleteJob if that
// is the case.
dir := jm.TransferDirection()
p := jm.PipelineNetworkStats()
if part0PlanStatus == common.EJobStatus.Cancelled() {
js.JobStatus = part0PlanStatus
js.PerformanceAdvice = JobsAdmin.TryGetPerformanceAdvice(js.TotalBytesExpected, js.TotalTransfers-js.TransfersSkipped, part0.Plan().FromTo, dir, p)
return js
}
// Job is completed if Job order is complete AND ALL transfers are completed/failed
// FIX: active or inactive state, then job order is said to be completed if final part of job has been ordered.
if (js.CompleteJobOrdered) && (part0PlanStatus.IsJobDone()) {
js.JobStatus = part0PlanStatus
}
if js.JobStatus.IsJobDone() {
js.PerformanceAdvice = JobsAdmin.TryGetPerformanceAdvice(js.TotalBytesExpected, js.TotalTransfers-js.TransfersSkipped, part0.Plan().FromTo, dir, p)
}
return js
}