in models/vision/detection/tools/test.py [0:0]
def main():
args = parse_args()
print(args)
assert args.out or args.json_out, \
('Please specify at least one operation to save the results) '
'with the argument "--out" or "--json_out"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
if args.json_out is not None and args.json_out.endswith('.json'):
args.json_out = args.json_out[:-5]
cfg = Config.fromfile(args.config)
cfg.model.pretrained = None
distributed = False
tf.config.experimental.set_visible_devices(gpus[args.gpu_id], 'GPU')
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
# dummy data to init network
img = tf.random.uniform(shape=[1333, 1333, 3], dtype=tf.float32)
img_meta = tf.constant(
[465., 640., 3., 800., 1101., 3., 1333., 1333., 3., 1.7204301, 0.],
dtype=tf.float32)
_ = model((tf.expand_dims(img, axis=0), tf.expand_dims(img_meta, axis=0)),
training=False)
load_checkpoint(model, args.checkpoint)
model.CLASSES = dataset.CLASSES
if not distributed:
outputs = single_gpu_test(model, dataset)
else:
raise NotImplementedError
rank, _, _, _ = get_dist_info()
if args.out and rank == 0:
print('\nwriting results to {}'.format(args.out))
fileio.dump(outputs, args.out)
eval_types = args.eval
if eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if eval_types == ['proposal_fast']:
result_file = args.out
coco_eval(result_file, eval_types, dataset.coco)
else:
if not isinstance(outputs[0], dict):
result_files = results2json(dataset, outputs, args.out)
coco_eval(result_files, eval_types, dataset.coco)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out + '.{}'.format(name)
result_files = results2json(dataset, outputs_,
result_file)
coco_eval(result_files, eval_types, dataset.coco)
# Save predictions in the COCO json format
if args.json_out and rank == 0:
if not isinstance(outputs[0], dict):
results2json(dataset, outputs, args.json_out)
else:
for name in outputs[0]:
outputs_ = [out[name] for out in outputs]
result_file = args.json_out + '.{}'.format(name)
results2json(dataset, outputs_, result_file)