in tools/eval.py [0:0]
def main():
args = parse_args()
assert args.out or args.eval or args.inference_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--inference-only", "--show" or "--show-dir"')
if args.eval and args.inference_only:
raise ValueError(
'--eval and --inference_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
if args.model_type is not None:
assert args.model_type in CONFIG_TEMPLATE_ZOO, 'model_type must be in [%s]' % (
', '.join(CONFIG_TEMPLATE_ZOO.keys()))
print('model_type=%s, config file will be replaced by %s' %
(args.model_type, CONFIG_TEMPLATE_ZOO[args.model_type]))
args.config = CONFIG_TEMPLATE_ZOO[args.model_type]
if args.config.startswith('http'):
r = requests.get(args.config)
# download config in current dir
tpath = args.config.split('/')[-1]
while not osp.exists(tpath):
try:
with open(tpath, 'wb') as code:
code.write(r.content)
except:
pass
args.config = tpath
cfg = mmcv_config_fromfile(args.config)
if args.user_config_params is not None:
assert args.model_type is not None, 'model_type must be setted'
# rebuild config by user config params
cfg = rebuild_config(cfg, args.user_config_params)
# check oss_config and init oss io
if cfg.get('oss_io_config', None) is not None:
io.access_oss(**cfg.oss_io_config)
# set multi-process settings
setup_multi_processes(cfg)
# dynamic adapt mmdet models
dynamic_adapt_for_mmlab(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if type(cfg.model.neck) is list:
pass
else:
if cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
if args.work_dir is not None and rank == 0:
if not io.exists(args.work_dir):
io.makedirs(args.work_dir)
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(args.work_dir, 'eval_{}.json'.format(timestamp))
# build the model and load checkpoint
model = build_model(cfg.model)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'use device {device}')
checkpoint = load_checkpoint(model, args.checkpoint, map_location=device)
# reparameter to deploy for RepVGG block
model = reparameterize_models(model)
model.to(device)
# if args.fuse_conv_bn:
# model = fuse_module(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'meta' in checkpoint and 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
elif hasattr(cfg, 'CLASSES'):
model.CLASSES = cfg.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
assert 'eval_pipelines' in cfg, 'eval_pipelines is needed for testting'
for eval_pipe in cfg.eval_pipelines:
eval_data = eval_pipe.get('data', None) or cfg.data.val
# build the dataloader
if eval_data.get('dali', False):
data_loader = datasets.build_dali_dataset(
eval_data).get_dataloader()
# dali dataloader implements `evaluate` func, so use it as dummy dataset
dataset = data_loader
else:
# dataset does not need imgs_per_gpu, except dali dataset
imgs_per_gpu = eval_data.pop('imgs_per_gpu', cfg.data.imgs_per_gpu)
dataset = build_dataset(eval_data)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=imgs_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# oss_config=cfg.get('oss_io_config', None))
if not distributed:
outputs = single_gpu_test(
model, data_loader, mode=eval_pipe.mode, use_fp16=args.fp16)
else:
outputs = multi_gpu_test(
model,
data_loader,
mode=eval_pipe.mode,
tmp_dir=args.tmpdir,
gpu_collect=args.gpu_collect,
use_fp16=args.fp16)
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
eval_kwargs = {}
if args.options is not None:
eval_kwargs.update(args.options)
if args.inference_only:
raise NotImplementedError('not implemented')
if args.eval:
for t in eval_pipe.evaluators:
if 'metric_type' in t:
t.pop('metric_type')
evaluators = build_evaluator(eval_pipe.evaluators)
eval_result = dataset.evaluate(outputs, evaluators=evaluators)
print(f'\n eval_result {eval_result}')
if args.work_dir is not None:
with io.open(log_file, 'w') as f:
json.dump(eval_result, f)