def eval()

in src/SealDetectionRCNN/train.py [0:0]


def eval(dataloader, faster_rcnn, trainer, dataset, test_num=10000):
    with torch.no_grad():
        print('Running validation')
        # Each predicted box is organized as :`(y_{min}, x_{min}, y_{max}, x_{max}), 
        # Where y corresponds to the height and x to the width
        pred_bboxes, pred_labels, pred_scores = list(), list(), list()
        gt_bboxes, gt_labels, gt_difficults = list(), list(), list()
        image_ids = list()
        for ii, (imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_, image_ids_) in tqdm(
                                                         enumerate(dataloader), total=test_num):
            sizes = [sizes[0].detach().numpy().tolist()[0],  sizes[1].detach().numpy().tolist()[0]]
            pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(imgs, [sizes])
            # We have to add .copy() here to allow for the loaded image to be released after each iteration
            gt_bboxes += list(gt_bboxes_.numpy().copy())
            gt_labels += list(gt_labels_.numpy().copy())
            gt_difficults += list(gt_difficults_.numpy().copy())
            image_ids += list(image_ids_.numpy().copy())
            pred_bboxes += [pp.copy() for pp in pred_bboxes_]
            pred_labels += [pp.copy() for pp in pred_labels_]
            pred_scores += [pp.copy() for pp in pred_scores_]
            if ii == test_num: break

        result = eval_detection_voc(
            pred_bboxes, pred_labels, pred_scores,
            gt_bboxes, gt_labels, gt_difficults,
            use_07_metric=True)
        
        if opt.validate_only:
            save_path = '{}_detections.npz'.format(opt.load_path)
            np.savez(save_path, pred_bboxes=pred_bboxes, 
                                pred_labels=pred_labels,
                                pred_scores=pred_scores,
                                gt_bboxes=gt_bboxes, 
                                gt_labels=gt_labels, 
                                gt_difficults=gt_difficults,
                                image_ids=image_ids,
                                result=result)
        else:
            ori_img_ = inverse_normalize(at.tonumpy(imgs[0]))
            gt_img = visdom_bbox(ori_img_,
                                 at.tonumpy(gt_bboxes[-1]),
                                 at.tonumpy(gt_labels[-1]),
                                 label_names=dataset.get_class_names()+['BG'])
            trainer.vis.img('test_gt_img', gt_img)

            # plot predicti bboxes
            pred_img = visdom_bbox(ori_img_,
                                   at.tonumpy(pred_bboxes[-1]),
                                   at.tonumpy(pred_labels[-1]).reshape(-1),
                                   at.tonumpy(pred_scores[-1]),
                                   label_names=dataset.get_class_names()+['BG'])
            trainer.vis.img('test_pred_img', pred_img)


        del imgs, gt_bboxes_, gt_labels_, gt_difficults_, image_ids_, pred_bboxes_, pred_labels_, pred_scores_
        torch.cuda.empty_cache()
        return result