maskrcnn_benchmark/data/datasets/evaluation/cityscapes/eval_instances.py [543:616]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                            if proportionIgnore <= overlapTh:
                                curTrue = np.append(curTrue, 0)
                                confidence = pred["confidence"]
                                curScore = np.append(curScore, confidence)

                    # append to overall results
                    y_true = np.append(y_true, curTrue)
                    y_score = np.append(y_score, curScore)

                # compute the average precision
                if haveGt and havePred:
                    # compute precision recall curve first

                    # sorting and cumsum
                    scoreArgSort = np.argsort(y_score)
                    yScoreSorted = y_score[scoreArgSort]
                    yTrueSorted = y_true[scoreArgSort]
                    yTrueSortedCumsum = np.cumsum(yTrueSorted)

                    # unique thresholds
                    (thresholds, uniqueIndices) = np.unique(
                        yScoreSorted, return_index=True
                    )

                    # since we need to add an artificial point to the precision-recall curve
                    # increase its length by 1
                    nbPrecRecall = len(uniqueIndices) + 1

                    # prepare precision recall
                    nbExamples = len(yScoreSorted)
                    nbTrueExamples = yTrueSortedCumsum[-1]
                    precision = np.zeros(nbPrecRecall)
                    recall = np.zeros(nbPrecRecall)

                    # deal with the first point
                    # only thing we need to do, is to append a zero to the cumsum at the end.
                    # an index of -1 uses that zero then
                    yTrueSortedCumsum = np.append(yTrueSortedCumsum, 0)

                    # deal with remaining
                    for idxRes, idxScores in enumerate(uniqueIndices):
                        cumSum = yTrueSortedCumsum[idxScores - 1]
                        tp = nbTrueExamples - cumSum
                        fp = nbExamples - idxScores - tp
                        fn = cumSum + hardFns
                        p = float(tp) / (tp + fp)
                        r = float(tp) / (tp + fn)
                        precision[idxRes] = p
                        recall[idxRes] = r

                    # first point in curve is artificial
                    precision[-1] = 1.0
                    recall[-1] = 0.0

                    # compute average of precision-recall curve
                    # integration is performed via zero order, or equivalently step-wise integration
                    # first compute the widths of each step:
                    # use a convolution with appropriate kernel, manually deal with the boundaries first
                    recallForConv = np.copy(recall)
                    recallForConv = np.append(recallForConv[0], recallForConv)
                    recallForConv = np.append(recallForConv, 0.0)

                    stepWidths = np.convolve(recallForConv, [-0.5, 0, 0.5], "valid")

                    # integrate is now simply a dot product
                    apCurrent = np.dot(precision, stepWidths)

                elif haveGt:
                    apCurrent = 0.0
                else:
                    apCurrent = float("nan")
                ap[dI, lI, oI] = apCurrent

    return ap
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



maskrcnn_benchmark/data/datasets/evaluation/cityscapes/eval_instances.py [747:820]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                            if proportionIgnore <= overlapTh:
                                curTrue = np.append(curTrue, 0)
                                confidence = pred["confidence"]
                                curScore = np.append(curScore, confidence)

                    # append to overall results
                    y_true = np.append(y_true, curTrue)
                    y_score = np.append(y_score, curScore)

                # compute the average precision
                if haveGt and havePred:
                    # compute precision recall curve first

                    # sorting and cumsum
                    scoreArgSort = np.argsort(y_score)
                    yScoreSorted = y_score[scoreArgSort]
                    yTrueSorted = y_true[scoreArgSort]
                    yTrueSortedCumsum = np.cumsum(yTrueSorted)

                    # unique thresholds
                    (thresholds, uniqueIndices) = np.unique(
                        yScoreSorted, return_index=True
                    )

                    # since we need to add an artificial point to the precision-recall curve
                    # increase its length by 1
                    nbPrecRecall = len(uniqueIndices) + 1

                    # prepare precision recall
                    nbExamples = len(yScoreSorted)
                    nbTrueExamples = yTrueSortedCumsum[-1]
                    precision = np.zeros(nbPrecRecall)
                    recall = np.zeros(nbPrecRecall)

                    # deal with the first point
                    # only thing we need to do, is to append a zero to the cumsum at the end.
                    # an index of -1 uses that zero then
                    yTrueSortedCumsum = np.append(yTrueSortedCumsum, 0)

                    # deal with remaining
                    for idxRes, idxScores in enumerate(uniqueIndices):
                        cumSum = yTrueSortedCumsum[idxScores - 1]
                        tp = nbTrueExamples - cumSum
                        fp = nbExamples - idxScores - tp
                        fn = cumSum + hardFns
                        p = float(tp) / (tp + fp)
                        r = float(tp) / (tp + fn)
                        precision[idxRes] = p
                        recall[idxRes] = r

                    # first point in curve is artificial
                    precision[-1] = 1.0
                    recall[-1] = 0.0

                    # compute average of precision-recall curve
                    # integration is performed via zero order, or equivalently step-wise integration
                    # first compute the widths of each step:
                    # use a convolution with appropriate kernel, manually deal with the boundaries first
                    recallForConv = np.copy(recall)
                    recallForConv = np.append(recallForConv[0], recallForConv)
                    recallForConv = np.append(recallForConv, 0.0)

                    stepWidths = np.convolve(recallForConv, [-0.5, 0, 0.5], "valid")

                    # integrate is now simply a dot product
                    apCurrent = np.dot(precision, stepWidths)

                elif haveGt:
                    apCurrent = 0.0
                else:
                    apCurrent = float("nan")
                ap[dI, lI, oI] = apCurrent

    return ap
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



