in maskrcnn_benchmark/data/datasets/evaluation/cityscapes/eval_instances.py [0:0]
def matchGtWithPred(dataset, predictions, idx):
# Collect instances from gt and pred separately per image
# TODO: not parallel! parallelize this process safely
perImgGtInstances, gtMasks = prepareGtImage(dataset, idx)
perImgPredInstances, predMasks = preparePredImage(dataset, predictions, idx)
# If no masks are provided, the segmentation score will be 0
for gt, gtMask in zip(perImgGtInstances, gtMasks):
for pred, predMask in zip(perImgPredInstances, predMasks):
if not isOverlapping(gt["box"], pred["box"]):
continue
boxIntersection = computeBoxIntersection(gt, pred)
maskIntersection = computeMaskIntersection(gt, gtMask, pred, predMask)
if boxIntersection > 0:
# Copy metadata only, and register the matched pairs
# this step is redundant but informative
# intersection score would be enough
gtCopy = gt.copy()
predCopy = pred.copy()
# remove linking field (an empty list) to avoid confusion
gtCopy.pop("matchedPred")
predCopy.pop("matchedGt")
gtCopy["boxIntersection"] = boxIntersection
gtCopy["maskIntersection"] = maskIntersection
predCopy["boxIntersection"] = boxIntersection
predCopy["maskIntersection"] = maskIntersection
gt["matchedPred"].append(predCopy)
pred["matchedGt"].append(gtCopy)
# Group by classes
groupedGtInstances = {labelName: [] for labelName in dataset.CLASSES}
groupedPredInstances = {labelName: [] for labelName in dataset.CLASSES}
for gt in perImgGtInstances:
gtLabelName = dataset.id_to_name[gt["labelID"]]
groupedGtInstances[gtLabelName].append(gt)
for pred in perImgPredInstances:
predLabelName = dataset.id_to_name[pred["labelID"]]
groupedPredInstances[predLabelName].append(pred)
match = {"groundTruth": groupedGtInstances, "prediction": groupedPredInstances}
return match