in siammot/eval/eval_det_ap.py [0:0]
def get_ap(vid_class_gt: DataSample, vid_class_pred: DataSample, filter_fn, eval_frame_idxs, iou_thresh=[0.5]):
"""
:param vid_class_gt: the ground truths for a specific class, in DataSample format
:param vid_class_pred: the predictions for a specific class, in DataSample format
:param filter_fn: a callable function to filter out detections
:param eval_frame_idxs: the frame indexs where evaluation happens
:param iou_thresh: the list of iou threshod that determines whether a detection is TP
:returns
vid_scores: the confidence for every predicted entity (a Python list)
vid_pr_ious: the iou between the predicted entity and its matching gt entity (a Python list)
vid_gt_ious: the iou between the gt entity and its matching predicted entity (a Python list)
"""
if not isinstance(iou_thresh, list):
iou_thresh = [iou_thresh]
vid_scores = [[] for _ in iou_thresh]
vid_pr_ious = [[] for _ in iou_thresh]
vid_gt_ious = [[] for _ in iou_thresh]
for frame_idx in eval_frame_idxs:
gt_entities = vid_class_gt.get_entities_for_frame_num(frame_idx)
pred_entities = vid_class_pred.get_entities_for_frame_num(frame_idx)
# Remove detections for evaluation that are within ignore regions
if filter_fn is not None:
# Filter out ignored gt entities
gt_entities, ignore_gt_entities = filter_fn(gt_entities, meta_data=vid_class_gt.metadata)
# Filter out predicted entities that overlaps with ignored gt entities
pred_entities, ignore_pred_entities = filter_fn(pred_entities, ignore_gt_entities)
# sort the entity based on confidence scores
pred_entities = sorted(pred_entities, key=lambda x: x.confidence, reverse=True)
iou_matrix = bbs_iou(pred_entities, gt_entities)
scores = [entity.confidence for entity in pred_entities]
for i, _iou in enumerate(iou_thresh):
# pred_ious, gt_ious = target_matching(pred_entities, gt_entities)
pred_ious, gt_ious = greedy_matching(copy.deepcopy(iou_matrix), _iou)
vid_scores[i] += scores
vid_pr_ious[i] += pred_ious
vid_gt_ious[i] += gt_ious
return vid_scores, vid_pr_ious, vid_gt_ious