IC/inference.py [82:114]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    model.eval()
    with torch.no_grad():
        dev_batch_list = data.get_batches(args.number_of_gpu * args.batch_size_per_gpu, mode='test')
        dev_batch_num_per_epoch = len(dev_batch_list)
        dev_p = progressbar.ProgressBar(dev_batch_num_per_epoch)
        print ('Number of evaluation batches is {}'.format(dev_batch_num_per_epoch))
        dev_p.start()
        dev_pred_text_list, dev_reference_text_list = [], []
        for p_dev_idx in range(dev_batch_num_per_epoch):
            dev_p.update(p_dev_idx)
            one_dev_batch = dev_batch_list[p_dev_idx]
            dev_batch_src_tensor, dev_batch_src_mask, dev_batch_input, dev_batch_labels = data.parse_batch_tensor(one_dev_batch)
            if cuda_available:
                dev_batch_src_tensor = dev_batch_src_tensor.to(device)
                dev_batch_src_mask = dev_batch_src_mask.to(device)
                dev_batch_input = dev_batch_input.to(device)
                dev_batch_labels = dev_batch_labels.to(device)
            if multi_gpu_training:
                one_dev_prediction_text_list = model.module.batch_prediction(dev_batch_src_tensor, dev_batch_src_mask)
            else:
                one_dev_prediction_text_list = model.batch_prediction(dev_batch_src_tensor, dev_batch_src_mask)
            dev_pred_text_list += one_dev_prediction_text_list
            if multi_gpu_training:
                dev_reference_text_list += model.module.parse_batch_text(dev_batch_input)
            else:
                dev_reference_text_list += model.parse_batch_text(dev_batch_input)
        dev_p.finish()
        assert len(dev_pred_text_list) == len(dev_reference_text_list)
        dev_same_num = 0
        for eva_idx in range(len(dev_pred_text_list)):
            if dev_pred_text_list[eva_idx].strip() == dev_reference_text_list[eva_idx].strip():
                dev_same_num += 1
        dev_acc = 100 * (dev_same_num / len(dev_pred_text_list))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



IC/learn.py [166:198]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        model.eval()
        with torch.no_grad():
            dev_batch_list = data.get_batches(args.number_of_gpu * args.batch_size_per_gpu, mode='test')
            dev_batch_num_per_epoch = len(dev_batch_list)
            dev_p = progressbar.ProgressBar(dev_batch_num_per_epoch)
            print ('Number of evaluation batches is {}'.format(dev_batch_num_per_epoch))
            dev_p.start()
            dev_pred_text_list, dev_reference_text_list = [], []
            for p_dev_idx in range(dev_batch_num_per_epoch):
                dev_p.update(p_dev_idx)
                one_dev_batch = dev_batch_list[p_dev_idx]
                dev_batch_src_tensor, dev_batch_src_mask, dev_batch_input, dev_batch_labels = data.parse_batch_tensor(one_dev_batch)
                if cuda_available:
                    dev_batch_src_tensor = dev_batch_src_tensor.to(device)
                    dev_batch_src_mask = dev_batch_src_mask.to(device)
                    dev_batch_input = dev_batch_input.to(device)
                    dev_batch_labels = dev_batch_labels.to(device)
                if multi_gpu_training:
                    one_dev_prediction_text_list = model.module.batch_prediction(dev_batch_src_tensor, dev_batch_src_mask)
                else:
                    one_dev_prediction_text_list = model.batch_prediction(dev_batch_src_tensor, dev_batch_src_mask)
                dev_pred_text_list += one_dev_prediction_text_list
                if multi_gpu_training:
                    dev_reference_text_list += model.module.parse_batch_text(dev_batch_input)
                else:
                    dev_reference_text_list += model.parse_batch_text(dev_batch_input)
            dev_p.finish()
            assert len(dev_pred_text_list) == len(dev_reference_text_list)
            dev_same_num = 0
            for eva_idx in range(len(dev_pred_text_list)):
                if dev_pred_text_list[eva_idx].strip() == dev_reference_text_list[eva_idx].strip():
                    dev_same_num += 1
            dev_acc = 100 * (dev_same_num / len(dev_pred_text_list))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



