def evaluate_one_epoch()

in downstream/votenet_det_new/lib/train.py [0:0]


def evaluate_one_epoch(net, train_dataloader, test_dataloader, config, epoch_cnt, CONFIG_DICT, writer):
    stat_dict = {} # collect statistics
    ap_calculator = APCalculator(ap_iou_thresh=0.5, class2type_map=CONFIG_DICT['dataset_config'].class2type)
    net.eval() # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(test_dataloader):
        if batch_idx % 10 == 0:
            logging.info('Eval batch: %d'%(batch_idx))
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].cuda()
        
        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        if 'voxel_coords' in batch_data_label:
            inputs.update({
                'voxel_coords': batch_data_label['voxel_coords'],
                'voxel_inds':   batch_data_label['voxel_inds'],
                'voxel_feats':  batch_data_label['voxel_feats']})

        with torch.no_grad():
            end_points = net(inputs)

        # Compute loss
        for key in batch_data_label:
            assert(key not in end_points)
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(end_points, CONFIG_DICT['dataset_config'])

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT) 
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT) 
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        if config.data.dump_results and batch_idx == 0 and epoch_cnt %10 == 0:
            dump_results(end_points, 'results', CONFIG_DICT['dataset_config']) 

    # Log statistics
    for key in sorted(stat_dict.keys()):
        writer.add_scalar('validation/{}'.format(key), stat_dict[key]/float(batch_idx+1),
                          (epoch_cnt+1)*len(train_dataloader)*config.data.batch_size)
        logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))

    # Evaluate average precision
    metrics_dict = ap_calculator.compute_metrics()
    for key in metrics_dict:
        logging.info('eval %s: %f'%(key, metrics_dict[key]))
    writer.add_scalar('validation/mAP@0.5', metrics_dict['mAP'], (epoch_cnt+1)*len(train_dataloader)*config.data.batch_size)

    mean_loss = stat_dict['loss']/float(batch_idx+1)
    return mean_loss