def test()

in downstream/votenet/lib/ddp_trainer.py [0:0]


    def test(self):
        if self.config.test.use_cls_nms:
            assert(self.config.test.use_3d_nms)

        AP_IOU_THRESHOLDS = self.config.test.ap_iou_thresholds
        logging.info(str(datetime.now()))
        # Reset numpy seed.
        # REF: https://github.com/pytorch/pytorch/issues/5059
        np.random.seed(0)
        stat_dict = {}
        ap_calculator_list = [APCalculator(iou_thresh, self.dataset_config.class2type) for iou_thresh in AP_IOU_THRESHOLDS]
        self.net.eval() # set model to eval mode (for bn and dp)
        for batch_idx, batch_data_label in enumerate(self.test_dataloader):
            if batch_idx % 10 == 0:
                print('Eval batch: %d'%(batch_idx))
            for key in batch_data_label:
                if key == 'scan_name':
                    continue
                batch_data_label[key] = batch_data_label[key].cuda()
            # Forward pass
            inputs = {'point_clouds': batch_data_label['point_clouds']}
            if 'voxel_coords' in batch_data_label:
                inputs.update({
                    'voxel_coords': batch_data_label['voxel_coords'],
                    'voxel_inds':   batch_data_label['voxel_inds'],
                    'voxel_feats':  batch_data_label['voxel_feats']})
            with torch.no_grad():
                end_points = self.net(inputs)

            # Compute loss
            for key in batch_data_label:
                assert(key not in end_points)
                end_points[key] = batch_data_label[key]
            loss, end_points = criterion(end_points, self.dataset_config)

            # Accumulate statistics and print out
            for key in end_points:
                if 'loss' in key or 'acc' in key or 'ratio' in key:
                    if key not in stat_dict: stat_dict[key] = 0
                    stat_dict[key] += end_points[key].item()

            batch_pred_map_cls = parse_predictions(end_points, self.CONFIG_DICT_TEST) 
            batch_gt_map_cls = parse_groundtruths(end_points, self.CONFIG_DICT_TEST) 
            for ap_calculator in ap_calculator_list:
                ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

            # debug
            if self.config.test.write_to_benchmark:
                #from lib.utils.io3d import write_triangle_mesh
                #write_triangle_mesh(batch_data_label['point_clouds'][0].cpu().numpy(), None, None, batch_data_label['scan_name'][0]+'.ply')
                DetectionTrainer.write_to_benchmark(batch_pred_map_cls, batch_data_label['scan_name'])
            
            if self.config.test.save_vis:
                dump_results_(end_points, 'visualization', self.dataset_config)

        # Log statistics
        for key in sorted(stat_dict.keys()):
            logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))

        # Evaluate average precision
        if not self.config.test.write_to_benchmark:
            for i, ap_calculator in enumerate(ap_calculator_list):
                logging.info('-'*10 + 'iou_thresh: %f'%(AP_IOU_THRESHOLDS[i]) + '-'*10)
                metrics_dict = ap_calculator.compute_metrics()
                for key in metrics_dict:
                    logging.info('eval %s: %f'%(key, metrics_dict[key]))

        mean_loss = stat_dict['loss']/float(batch_idx+1)
        return mean_loss