in engine/eval_detection.py [0:0]
def main_detection_evaluation(**kwargs):
opts = get_detection_eval_arguments()
dataset_name = getattr(opts, "dataset.name", "imagenet")
if dataset_name.find("coco") > -1:
# replace model specific datasets (e.g., coco_ssd) with general COCO dataset
setattr(opts, "dataset.name", "coco")
# device set-up
opts = device_setup(opts)
node_rank = getattr(opts, "ddp.rank", 0)
if node_rank < 0:
logger.error('--rank should be >=0. Got {}'.format(node_rank))
is_master_node = is_master(opts)
# create the directory for saving results
save_dir = getattr(opts, "common.results_loc", "results")
run_label = getattr(opts, "common.run_label", "run_1")
exp_dir = '{}/{}'.format(save_dir, run_label)
setattr(opts, "common.exp_loc", exp_dir)
logger.log("Results (if any) will be stored here: {}".format(exp_dir))
create_directories(dir_path=exp_dir, is_master_node=is_master_node)
num_gpus = getattr(opts, "dev.num_gpus", 1)
if num_gpus < 2:
cls_norm_type = getattr(opts, "model.normalization.name", "batch_norm_2d")
if cls_norm_type.find("sync") > -1:
# replace sync_batch_norm with standard batch norm on PU
setattr(opts, "model.normalization.name", cls_norm_type.replace("sync_", ""))
setattr(opts, "model.classification.normalization.name", cls_norm_type.replace("sync_", ""))
# we disable the DDP setting for evaluation tasks
setattr(opts, "ddp.use_distributed", False)
# No of data workers = no of CPUs (if not specified or -1)
n_cpus = multiprocessing.cpu_count()
dataset_workers = getattr(opts, "dataset.workers", -1)
if dataset_workers == -1:
setattr(opts, "dataset.workers", n_cpus)
# We are not performing any operation like resizing and cropping on images
# Because image dimensions are different, we process 1 sample at a time.
setattr(opts, "dataset.train_batch_size0", 1)
setattr(opts, "dataset.val_batch_size0", 1)
setattr(opts, "dev.device_id", None)
eval_mode = getattr(opts, "evaluation.detection.mode", None)
if eval_mode == "single_image":
num_classes = getattr(opts, "model.detection.n_classes", 81)
assert num_classes is not None
# test a single image
img_f_name = getattr(opts, "evaluation.detection.path", None)
predict_image(opts, img_f_name, **kwargs)
elif eval_mode == "image_folder":
num_seg_classes = getattr(opts, "model.detection.n_classes", 81)
assert num_seg_classes is not None
# test all images in a folder
predict_images_in_folder(opts=opts, **kwargs)
elif eval_mode == "validation_set":
# evaluate and compute stats for labeled image dataset
# This is useful for generating results for validation set and compute quantitative results
predict_labeled_dataset(opts=opts, **kwargs)
else:
logger.error(
"Supported modes are single_image, image_folder, and validation_set. Got: {}".format(eval_mode)
)