in threedod/benchmark_scripts/utils/eval_utils.py [0:0]
def compute_metrics(self):
"""Use accumulated predictions and groundtruths to compute Average Precision.
# 1. calculate by-category precision, recall with multiprocessing mode
each process for each category.
Args:
Returns:
ret_dict: dict
mAP: float (average AP over all categories)
AR: float (average recall over all categories)
"""
pred = {}
gt = {}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred: pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox,score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
for classname in gt.keys():
print('Computing AP for class: ', classname)
rec[classname], prec[classname], ap[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func)
print(classname, ap[classname])
ret_dict = {}
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
ret_dict['%s Average Precision'%(clsname)] = ap[key]
ret_dict['mAP'] = np.mean(list(ap.values()))
rec_list = []
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
try:
ret_dict['%s Recall'%(clsname)] = rec[key][-1]
rec_list.append(rec[key][-1])
except:
ret_dict['%s Recall'%(clsname)] = 0
rec_list.append(0)
ret_dict['AR'] = np.mean(rec_list)
return ret_dict