in lib/utils/metrics.py [0:0]
def calculate_and_log_all_metrics_test(
self, curr_iter, timer, total_iters, suffix=''):
"""Calculate and log metrics for testing."""
# To be safe, we only trust what we load from workspace.
cur_batch_size = get_batch_size_from_workspace()
self.aggr_batch_size += cur_batch_size
(preds, labels,
original_boxes, metadata) = get_multi_gpu_outputs(self.split, suffix)
self.all_preds.append(preds)
self.all_labels.append(labels)
if cfg.MODEL.MULTI_LABEL:
self.all_original_boxes.append(original_boxes)
self.all_metadata.append(metadata)
else:
accuracy_metrics = compute_multi_gpu_topk_accuracy(
top_k=1, split=self.split, suffix=suffix)
accuracy5_metrics = compute_multi_gpu_topk_accuracy(
top_k=5, split=self.split, suffix=suffix)
cur_err = (1.0 - accuracy_metrics['topk_accuracy']) * 100
cur_err5 = (1.0 - accuracy5_metrics['topk_accuracy']) * 100
self.aggr_err += cur_err * cur_batch_size
self.aggr_err5 += cur_err5 * cur_batch_size
if (curr_iter + 1) % cfg.LOG_PERIOD == 0 \
or curr_iter + 1 == total_iters:
test_str = ' '.join((
'| Test: [{}/{}]',
' Time {:0.3f}',
' current batch {}',
' aggregated batch {}',
)).format(
curr_iter + 1, total_iters,
timer.diff,
cur_batch_size, self.aggr_batch_size
)
if not cfg.MODEL.MULTI_LABEL:
test_str += (' top1 {:7.3f} ({:7.3f})'
+ ' top5 {:7.3f} ({:7.3f})').format(
cur_err, self.aggr_err / self.aggr_batch_size,
cur_err5, self.aggr_err5 / self.aggr_batch_size,
)
print(test_str)