in vision/amazon-sagemaker-pytorch-detectron2/container_training/sku-110k/evaluation/coco.py [0:0]
def evaluate(self):
"""
Run per image evaluation on given images and store results in self.evalImgs_cpp, a
datastructure that isn't readable from Python but is used by a c++ implementation of
accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
self.evalImgs because this datastructure is a computational bottleneck.
:return: None
"""
tic = time.time()
print("Running per image evaluation...")
params = self.params
# add backward compatibility if useSegm is specified in params
if params.useSegm is not None:
params.iouType = "segm" if params.useSegm == 1 else "bbox"
print(
"useSegm (deprecated) is not None. Running {} evaluation".format(
params.iouType
)
)
print("Evaluate annotation type *{}*".format(params.iouType))
params.imgIds = list(np.unique(params.imgIds))
if params.useCats:
params.catIds = list(np.unique(params.catIds))
params.maxDets = sorted(params.maxDets)
self.params = params
self._prepare()
# loop through images, area range, max detection number
cat_ids = params.catIds if params.useCats else [-1]
if params.iouType == "segm" or params.iouType == "bbox":
compute_IoU = self.computeIoU
elif params.iouType == "keypoints":
compute_IoU = self.computeOks
else:
assert False, f"Add implementation for {params.iouType}"
self.ious = {
(imgId, catId): compute_IoU(imgId, catId)
for imgId in params.imgIds
for catId in cat_ids
}
maxDet = params.maxDets[-1]
# <<<< Beginning of code differences with original COCO API
def convert_instances_to_cpp(instances, is_det=False):
# Convert annotations for a list of instances in an image to a format that's fast
# to access in C++
instances_cpp = []
for instance in instances:
instance_cpp = _C.InstanceAnnotation(
int(instance["id"]),
instance["score"] if is_det else instance.get("score", 0.0),
instance["area"],
bool(instance.get("iscrowd", 0)),
bool(instance.get("ignore", 0)),
)
instances_cpp.append(instance_cpp)
return instances_cpp
# Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
ground_truth_instances = [
[
convert_instances_to_cpp(self._gts[imgId, catId])
for catId in params.catIds
]
for imgId in params.imgIds
]
detected_instances = [
[
convert_instances_to_cpp(self._dts[imgId, catId], is_det=True)
for catId in params.catIds
]
for imgId in params.imgIds
]
ious = [
[self.ious[imgId, catId] for catId in cat_ids] for imgId in params.imgIds
]
if not params.useCats:
# For each image, flatten per-category lists into a single list
ground_truth_instances = [
[[o for c in i for o in c]] for i in ground_truth_instances
]
detected_instances = [
[[o for c in i for o in c]] for i in detected_instances
]
# Call C++ implementation of self.evaluateImgs()
self._evalImgs_cpp = _C.COCOevalEvaluateImages(
params.areaRng,
maxDet,
params.iouThrs,
ious,
ground_truth_instances,
detected_instances,
)
self._evalImgs = None
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))