def recall_precision()

in mlebench/competitions/3d-object-detection-for-autonomous-vehicles/mAP_evaluation.py [0:0]


def recall_precision(gt, predictions, iou_threshold):
    num_gts = len(gt)
    image_gts = group_by_key(gt, "sample_token")
    image_gts = wrap_in_box(image_gts)

    sample_gt_checked = {
        sample_token: np.zeros(len(boxes)) for sample_token, boxes in image_gts.items()
    }

    predictions = sorted(predictions, key=lambda x: x["score"], reverse=True)

    # go down dets and mark TPs and FPs
    num_predictions = len(predictions)
    tp = np.zeros(num_predictions)
    fp = np.zeros(num_predictions)

    for prediction_index, prediction in enumerate(predictions):
        predicted_box = Box3D(**prediction)

        sample_token = prediction["sample_token"]

        max_overlap = -np.inf
        jmax = -1

        try:
            gt_boxes = image_gts[sample_token]  # gt_boxes per sample
            gt_checked = sample_gt_checked[sample_token]  # gt flags per sample
        except KeyError:
            gt_boxes = []
            gt_checked = None

        if len(gt_boxes) > 0:
            overlaps = get_ious(gt_boxes, predicted_box)

            max_overlap = np.max(overlaps)

            jmax = np.argmax(overlaps)

        if max_overlap > iou_threshold:
            if gt_checked[jmax] == 0:
                tp[prediction_index] = 1.0
                gt_checked[jmax] = 1
            else:
                fp[prediction_index] = 1.0
        else:
            fp[prediction_index] = 1.0

    # compute precision recall
    fp = np.cumsum(fp, axis=0)
    tp = np.cumsum(tp, axis=0)

    recalls = tp / float(num_gts)

    assert np.all(0 <= recalls) & np.all(recalls <= 1)

    # avoid divide by zero in case the first detection matches a difficult ground truth
    precisions = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)

    assert np.all(0 <= precisions) & np.all(precisions <= 1)

    ap = get_ap(recalls, precisions)

    return recalls, precisions, ap