def eval_stsdev()

in PairSupCon/training.py [0:0]


    def eval_stsdev(self):
    
        def prepare(params, samples):
            return 

        def batcher(params, batch):
            sentences = [' '.join(s) for s in batch]

            features = self.tokenizer.batch_encode_plus(
                sentences, 
                max_length=params['max_length'], 
                return_tensors='pt', 
                padding=True, 
                truncation=True
            )
            input_ids = features['input_ids'].to(params['device']) 
            attention_mask = features['attention_mask'].to(params['device'])

            with torch.no_grad():
                embeddings = self.model(input_ids=input_ids, attention_mask=attention_mask, task_type="evaluate")
            return embeddings.detach().cpu().numpy()

        # define senteval params
        params_senteval = {'task_path': self.args.path_sts_data, 'usepytorch': True, 'kfold': 5}
        params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 64,
                                         'tenacity': 3, 'epoch_size': 2}
        params_senteval['max_length'] = None
        params_senteval['device'] = torch.device("cuda:{}".format(0))

        se = senteval.engine.SE(params_senteval, batcher, prepare)
        transfer_tasks = ['SICKRelatedness', 'STSBenchmark']
        results = se.eval(transfer_tasks)

        stsb_spearman = results['STSBenchmark']['dev']['spearman'][0]
        sickr_spearman = results['SICKRelatedness']['dev']['spearman'][0]
        metrics = {"eval_stsb_spearman": stsb_spearman, "eval_sickr_spearman": sickr_spearman, "eval_avg_sts": (stsb_spearman + sickr_spearman) / 2} 
        return metrics