def run_evaluation()

in lib/utils/ava_eval_helper.py [0:0]


def run_evaluation(categories, groundtruth, detections,
                   excluded_keys, verbose=True):
    """AVA evaluation main logic."""

    pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
        categories)

    boxes, labels, _ = groundtruth

    for image_key in boxes:
        if image_key in excluded_keys:
            logging.info(("Found excluded timestamp in ground truth: %s. "
                          "It will be ignored."), image_key)
            continue
        pascal_evaluator.add_single_ground_truth_image_info(
            image_key, {
                standard_fields.InputDataFields.groundtruth_boxes:
                    np.array(boxes[image_key], dtype=float),
                standard_fields.InputDataFields.groundtruth_classes:
                    np.array(labels[image_key], dtype=int),
                standard_fields.InputDataFields.groundtruth_difficult:
                    np.zeros(len(boxes[image_key]), dtype=bool)
            })

    boxes, labels, scores = detections

    for image_key in boxes:
        if image_key in excluded_keys:
            logging.info(("Found excluded timestamp in detections: %s. "
                          "It will be ignored."), image_key)
            continue
        pascal_evaluator.add_single_detected_image_info(
            image_key, {
                standard_fields.DetectionResultFields.detection_boxes:
                    np.array(boxes[image_key], dtype=float),
                standard_fields.DetectionResultFields.detection_classes:
                    np.array(labels[image_key], dtype=int),
                standard_fields.DetectionResultFields.detection_scores:
                    np.array(scores[image_key], dtype=float)
            })

    metrics = pascal_evaluator.evaluate()

    pprint.pprint(metrics, indent=2)
    return metrics