in sav_dataset/utils/sav_benchmark.py [0:0]
def __call__(self, vid_name: str) -> Tuple[str, Dict[str, float], Dict[str, float]]:
"""
vid_name: name of the video to evaluate
"""
# scan the folder to find subfolders for evaluation and
# check if the folder structure is SA-V
to_evaluate, is_sav_format = self.scan_vid_folder(vid_name)
# evaluate each (gt_path, pred_path) pair
eval_results = []
for all_frames, obj_id, gt_path, pred_path in to_evaluate:
if self.skip_first_and_last:
# skip the first and the last frames
all_frames = all_frames[1:-1]
evaluator = Evaluator(name=vid_name, obj_id=obj_id)
for frame in all_frames:
gt_array, pred_array = self.get_gt_and_pred(
gt_path, pred_path, frame, is_sav_format
)
evaluator.feed_frame(mask=pred_array, gt=gt_array)
iou, boundary_f = evaluator.conclude()
eval_results.append((obj_id, iou, boundary_f))
if is_sav_format:
iou_output, boundary_f_output = self.consolidate(eval_results)
else:
assert len(eval_results) == 1
iou_output = eval_results[0][1]
boundary_f_output = eval_results[0][2]
return vid_name, iou_output, boundary_f_output