def calculate_instance_specific_instance_accuracy_from_arrays()

in mapillary_vistas/evaluation/instance_specific_instance_level.py [0:0]


def calculate_instance_specific_instance_accuracy_from_arrays(instance_predictions_path, ground_truth, labels):
    """
    Load all prediction masks for the current image, and match with all
    ground truth masks.
    """

    # reduce to label information
    ground_truth_labels = ground_truth / 256

    # caluclate ground truth which is ignored
    # hits on these pixels will not count in evaluation
    # in the same loop, create a dictionary for label lookup by name
    ground_truth_ignore_labels = np.zeros_like(ground_truth_labels, dtype=np.bool)
    label_indices = {}
    for index, label in enumerate(labels):
        label_indices[label['name']] = index
        if not label['evaluate']:
            ground_truth_ignore_labels[ground_truth_labels == index] = True

    try:
        with open(instance_predictions_path) as instance_file:
            instance_prediction_infos = {}
            for line in instance_file.readlines():
                line = line.strip()
                if line.startswith("#"):
                    continue
                mask_file, label, confidence = line.split(" ")

                # enable the loading of label id or label name
                try:
                    label_id = int(label)
                except ValueError:
                    label_id = label_indices[label]

                if label_id not in instance_prediction_infos:
                    instance_prediction_infos[label_id] = []
                instance_prediction_infos[label_id].append({
                    'mask': os.path.join(os.path.dirname(instance_predictions_path), mask_file),
                    'confidence': float(confidence),
                })
    except:
        print("ERROR in {}".format(instance_predictions_path))
        print("Ensure the instance file format to be")
        print("<path to instance mask> (<label id> | <label name>) <confidence>")
        raise

    # initialize result structures
    overlap_information = {}

    # the metric is label specific
    for label_id, label in enumerate(labels):
        if not label['evaluate']:
            continue
        if not label['instances']:
            continue

        if label_id not in instance_prediction_infos:
            instance_prediction_infos[label_id] = []

        # get list of instances of current label in current image
        # note that due to overlaps, the ids might not be sequential
        ground_truth_instance_ids = np.unique(ground_truth[ground_truth_labels == label_id] % 256)

        # cache the ground truth masks/sizes for faster processing
        ground_truth_instance_information = {}
        ground_truths = {}
        for instance_id in ground_truth_instance_ids:
            instance_indices = ground_truth == instance_id + label_id * 256
            ground_truth_instance_information[instance_id] = instance_indices
            ground_truths[instance_id] = {
                'size': np.count_nonzero(instance_indices)
            }

        # prediction ids are sequential, but use the same structure for readability
        prediction_instance_ids = range(len(instance_prediction_infos[label_id]))

        prediction_instance_information = {}
        predictions = {}
        for instance_id in prediction_instance_ids:
            instance_image = Image.open(instance_prediction_infos[label_id][instance_id]['mask'])
            instance_array = np.array(instance_image)
            instance_indices = instance_array != 0
            prediction_size = np.count_nonzero(instance_indices)

            # skip emtpy masks
            if prediction_size == 0:
                continue

            prediction_instance_information[instance_id] = instance_indices

            ignore_pixel_count = np.count_nonzero(np.logical_and(
                instance_indices,
                ground_truth_ignore_labels
            ))

            predictions[instance_id] = {
                'confidence': instance_prediction_infos[label_id][instance_id]['confidence'],
                'ignore_pixel_count': ignore_pixel_count,
                'file': instance_prediction_infos[label_id][instance_id]['mask'],
                'size': prediction_size,
            }

        overlap_information[label_id] = {
            'ground_truths': ground_truths,
            'predictions': predictions,
            'ground_truth_overlaps': {},
            'prediction_overlaps': {},
            'file': instance_predictions_path,
        }

        # test every combination
        iterator = itertools.product(
            ground_truth_instance_information.keys(),
            prediction_instance_information.keys()
        )
        for ground_truth_id, prediction_id in iterator:
            overlap = calculate_iou_for_indices(
                ground_truth_instance_information[ground_truth_id],
                prediction_instance_information[prediction_id],
                ground_truths[ground_truth_id]['size'],
                predictions[prediction_id]['size']
            )

            # this information only needs to be stored once per instance, not per pair
            overlap.pop('ground_truth_size')
            overlap.pop('prediction_size')

            # only store true matches
            if overlap['iou'] > 0:
                if ground_truth_id not in overlap_information[label_id]['ground_truth_overlaps']:
                    overlap_information[label_id]['ground_truth_overlaps'][ground_truth_id] = {}
                if prediction_id not in overlap_information[label_id]['prediction_overlaps']:
                    overlap_information[label_id]['prediction_overlaps'][prediction_id] = {}

                # store the information in both directions
                overlap_information[label_id]['ground_truth_overlaps'][ground_truth_id][prediction_id] = overlap
                overlap_information[label_id]['prediction_overlaps'][prediction_id][ground_truth_id] = overlap

    return overlap_information