in mapillary_vistas/evaluation/evaluation.py [0:0]
def evaluate_dirs(labels, args):
"""
Evaluate the given command line parameters and print/plot the results.
"""
prediction_dir = None
if args.prediction_labels is not None:
prediction_dir = os.path.abspath(args.prediction_labels)
ground_truth_label_dir = os.path.abspath(args.ground_truth_labels)
instance_dir = None
if args.instances is not None:
instance_dir = os.path.abspath(args.instances)
jobs = args.jobs
print("Parsing directories for images...")
image_tuples = []
for (path, _, files) in os.walk(ground_truth_label_dir):
for ground_truth_file in files:
# ignore non png predictions
if not ground_truth_file.endswith('.png'):
continue
ground_truth_path = os.path.join(path, ground_truth_file)
prediction_path = None
if prediction_dir is not None:
prediction_path = ground_truth_path.replace(
ground_truth_label_dir,
prediction_dir,
)
if instance_dir is not None:
instance_path = ground_truth_path.replace(
ground_truth_label_dir,
instance_dir
)
instance_path = os.path.splitext(instance_path)[0] + '.txt'
else:
instance_path = None
image_tuples.append({
'ground_truth': ground_truth_path,
'prediction': prediction_path,
'instances': instance_path,
})
print("Found {} predictions with ground truth".format(len(image_tuples)))
# initialize result structures
confusion_matrix = None
instance_specific_pixel_information = {}
instance_specific_instance_information = []
for label in labels:
instance_specific_pixel_information[label['name']] = {
'raw_true_positives': 0,
'weighted_true_positives': 0,
'raw_false_negatives': 0,
'weighted_false_negatives': 0,
}
print("Analysing predictions")
if jobs == 1:
# if only one job is allowed, use the current process to
# improve signal handling and backtrace information in case
# of errors
for files in progress(image_tuples):
result = process_image(labels, files)
confusion_matrix, \
instance_specific_pixel_information, \
instance_specific_instance_information = \
add_result(
result,
confusion_matrix,
instance_specific_pixel_information,
instance_specific_instance_information
)
else:
# jobs can be a number or None (in which case all cores will be used)
pool = Pool(processes=jobs)
pool_args = zip(itertools.repeat(labels, len(image_tuples)), image_tuples)
results = pool.imap_unordered(process_image_unpack_args, pool_args)
for result in progress(results, total=len(image_tuples)):
confusion_matrix, \
instance_specific_pixel_information, \
instance_specific_instance_information = \
add_result(
result,
confusion_matrix,
instance_specific_pixel_information,
instance_specific_instance_information
)
pool.close()
pool.join()
if len(instance_specific_instance_information) > 0:
print("Calculating instance specific accuracy")
precisions, precisions_50 = calculate_average_precision(instance_specific_instance_information, labels, args)
print_precisions(labels, precisions, precisions_50)
if confusion_matrix is not None:
# print the results according to command line parameters
reduced_labels, reduced_confusion_matrix, reduced_instance_specific_pixel_information = reduce_evaluation_to_evaluated_categories(labels, confusion_matrix, instance_specific_pixel_information)
if args.print_absolute_confusion_matrix:
percentage = False
else:
percentage = True
if args.print_full_confusion_matrix:
labels_for_printing = labels
confusion_matrix_for_printing = confusion_matrix
instance_specific_information_for_printing = reduced_instance_specific_pixel_information
else:
labels_for_printing = reduced_labels
confusion_matrix_for_printing = reduced_confusion_matrix
instance_specific_information_for_printing = instance_specific_pixel_information
if args.plot:
plot_confusion_matrix(labels_for_printing, confusion_matrix_for_printing, args.plot_dir, "confusion_matrix", args.plot_extension)
print_confusion_matrix(labels_for_printing, confusion_matrix_for_printing, percent=percentage)
print_ious(labels_for_printing, confusion_matrix_for_printing, instance_specific_information_for_printing)
meta_labels, meta_confusion_matrix, meta_instance = reduce_evaluation_to_metalevel(labels_for_printing, confusion_matrix_for_printing, instance_specific_information_for_printing, 2)
if args.plot:
plot_confusion_matrix(meta_labels, meta_confusion_matrix, args.plot_dir, "confusion_matrix_meta_2", args.plot_extension)
print_confusion_matrix(meta_labels, meta_confusion_matrix, percent=percentage)
print_ious(meta_labels, meta_confusion_matrix, meta_instance)
meta_labels, meta_confusion_matrix, meta_instance = reduce_evaluation_to_metalevel(labels_for_printing, confusion_matrix_for_printing, instance_specific_information_for_printing, 1)
if args.plot:
plot_confusion_matrix(meta_labels, meta_confusion_matrix, args.plot_dir, "confusion_matrix_meta_1", args.plot_extension)
print_confusion_matrix(meta_labels, meta_confusion_matrix, percent=percentage)
print_ious(meta_labels, meta_confusion_matrix, meta_instance)