modelling/src/neuraldb/evaluation/postprocess_baselines.py [37:107]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def postprocess_text(preds, labels):
        preds = [
            [
                answer.strip() if len(answer.strip()) else generator.null_answer_special
                for answer in pred.replace(
                    tokenizer.bos_token if tokenizer.bos_token is not None else "", ""
                )
                .replace(
                    tokenizer.eos_token if tokenizer.eos_token is not None else "", ""
                )
                .replace(
                    tokenizer.pad_token if tokenizer.pad_token is not None else "", ""
                )
                .strip()
                .split(generator.answer_delimiter)
            ]
            for pred in preds
        ]
        labels = [
            [
                answer.strip() if len(answer.strip()) else generator.null_answer_special
                for answer in label.replace(
                    tokenizer.bos_token if tokenizer.bos_token is not None else "", ""
                )
                .replace(
                    tokenizer.eos_token if tokenizer.eos_token is not None else "", ""
                )
                .replace(
                    tokenizer.pad_token if tokenizer.pad_token is not None else "", ""
                )
                .strip()
                .split(generator.answer_delimiter)
            ]
            for label in labels
        ]

        return preds, labels

    def compute_metrics(eval_preds):
        preds, labels, metadata = eval_preds

        if isinstance(preds, tuple):
            preds = preds[0]

        decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=False)
        if data_args.ignore_pad_token_for_loss:
            # Replace -100 in the labels as we can't decode them.
            labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
        decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=False)

        # Some simple post-processing
        decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
        if data_args.predictions_file is not None:
            with open(data_args.predictions_file, "w+") as f:
                for pred, label, meta in zip(decoded_preds, decoded_labels, metadata):
                    f.write(
                        json.dumps(
                            {"prediction": pred, "actual": label, "metadata": meta}
                        )
                        + "\n"
                    )

        sampled_ids = random.sample(list(range(len(decoded_preds))), 10)
        for id in sampled_ids:
            logger.info(
                f"Example prediction  \n"
                f"Q: {metadata[id]['question']}\n"
                f"P: {decoded_preds[id]}\n"
                f"A: {decoded_labels[id]}\n"
                f"\n"
            )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



modelling/src/neuraldb/evaluation/postprocess_spj.py [36:106]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def postprocess_text(preds, labels):
        preds = [
            [
                answer.strip() if len(answer.strip()) else generator.null_answer_special
                for answer in pred.replace(
                    tokenizer.bos_token if tokenizer.bos_token is not None else "", ""
                )
                .replace(
                    tokenizer.eos_token if tokenizer.eos_token is not None else "", ""
                )
                .replace(
                    tokenizer.pad_token if tokenizer.pad_token is not None else "", ""
                )
                .strip()
                .split(generator.answer_delimiter)
            ]
            for pred in preds
        ]
        labels = [
            [
                answer.strip() if len(answer.strip()) else generator.null_answer_special
                for answer in label.replace(
                    tokenizer.bos_token if tokenizer.bos_token is not None else "", ""
                )
                .replace(
                    tokenizer.eos_token if tokenizer.eos_token is not None else "", ""
                )
                .replace(
                    tokenizer.pad_token if tokenizer.pad_token is not None else "", ""
                )
                .strip()
                .split(generator.answer_delimiter)
            ]
            for label in labels
        ]

        return preds, labels

    def compute_metrics(eval_preds):
        preds, labels, metadata = eval_preds

        if isinstance(preds, tuple):
            preds = preds[0]

        decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=False)
        if data_args.ignore_pad_token_for_loss:
            # Replace -100 in the labels as we can't decode them.
            labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
        decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=False)

        # Some simple post-processing
        decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
        if data_args.predictions_file is not None:
            with open(data_args.predictions_file, "w+") as f:
                for pred, label, meta in zip(decoded_preds, decoded_labels, metadata):
                    f.write(
                        json.dumps(
                            {"prediction": pred, "actual": label, "metadata": meta}
                        )
                        + "\n"
                    )

        sampled_ids = random.sample(list(range(len(decoded_preds))), 10)
        for id in sampled_ids:
            logger.info(
                f"Example prediction  \n"
                f"Q: {metadata[id]['question']}\n"
                f"P: {decoded_preds[id]}\n"
                f"A: {decoded_labels[id]}\n"
                f"\n"
            )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



