def eval()

in FasterRCNNDetection/train.py [0:0]


def eval(dataloader, faster_rcnn, trainer, dataset, global_step, test_num=10000):
    """
    Evaluates a trained detector on a data set.
    """

    with torch.no_grad():

        print('Running validation')
        # Each predicted box is organized as :`(y_{min}, x_{min}, y_{max}, x_{max}), 
        # Where y corresponds to the height and x to the width
        pred_bboxes, pred_labels, pred_scores = list(), list(), list()
        gt_bboxes, gt_labels, gt_difficults = list(), list(), list()
        image_ids = list()

        for ii, (imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_, image_ids_) in tqdm(
                                                         enumerate(dataloader), total=test_num):
            sizes = [sizes[0].detach().numpy().tolist()[0],  sizes[1].detach().numpy().tolist()[0]]
            pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(imgs, [sizes])
            
            # We have to add .copy() here to allow for the loaded image to be released after each iteration
            if len(gt_bboxes_) > 0:
            
                # The original loader creates numpy arrays
                if hasattr(gt_bboxes_, 'numpy'):
                    gt_bboxes += list(gt_bboxes_.numpy().copy())
            
                # To support empty images, we have to switch to lists and hence we also need to support these here
                else:
                    gt_bboxes.append([[j.item() for j in i] for i in gt_bboxes_])
            else:
                gt_bboxes += []
            gt_labels += list(gt_labels_.numpy().copy())
            gt_difficults += list(gt_difficults_.numpy().copy())
            image_ids += list(image_ids_.numpy().copy())
            pred_bboxes += [pp.copy() for pp in pred_bboxes_]
            pred_labels += [pp.copy() for pp in pred_labels_]
            pred_scores += [pp.copy() for pp in pred_scores_]
            if ii == test_num: break

        result = eval_detection_voc(
            pred_bboxes, pred_labels, pred_scores,
            gt_bboxes, gt_labels, gt_difficults,
            use_07_metric=True)

        if opt.validate_only:
            save_path = '{}_detections.npz'.format(opt.load_path)
            np.savez(save_path, pred_bboxes=pred_bboxes, 
                                pred_labels=pred_labels,
                                pred_scores=pred_scores,
                                gt_bboxes=gt_bboxes, 
                                gt_labels=gt_labels, 
                                gt_difficults=gt_difficults,
                                image_ids=image_ids,
                                result=result)
        else:
            classwise_ap = dict()
            for cname, ap in zip(dataset.get_class_names(), result['ap']):
                classwise_ap[cname] = ap
            writer.add_scalars('validation/classwise_ap', classwise_ap, global_step)
            writer.add_scalar('validation/mAP', result['map'], global_step)
            writer.add_scalars('validation/prec@recall', result['prec@recall'], global_step)

            ori_img_ = inverse_normalize(at.tonumpy(imgs[0]))
            gt_img = visdom_bbox(ori_img_,
                                 at.tonumpy(gt_bboxes[-1]),
                                 at.tonumpy(gt_labels[-1]),
                                 label_names=dataset.get_class_names()+['BG'])
            writer.add_image('test_gt_img', gt_img, global_step)

            # plot predicted bboxes
            pred_img = visdom_bbox(ori_img_,
                                   at.tonumpy(pred_bboxes[-1]),
                                   at.tonumpy(pred_labels[-1]).reshape(-1),
                                   at.tonumpy(pred_scores[-1]),
                                   label_names=dataset.get_class_names()+['BG'])
            writer.add_image('test_pred_img', pred_img, global_step)


        del imgs, gt_bboxes_, gt_labels_, gt_difficults_, image_ids_, pred_bboxes_, pred_labels_, pred_scores_
        torch.cuda.empty_cache()
        return result