def from_tensors()

in runinferenceutil/infra.py [0:0]


def from_tensors(result: PredictionResult) -> tuple[str, str]:
    """Decodes output token tensors into text.
    Args:
        result: Prediction results from the RunInference transform.
        tokenizer: Tokenizer for the LLM model.
    Returns: The model's response as text.
    """
#    PredictionResult
    input_tokens = result.example
    decoded_inputs = tokenizer.decode(
         input_tokens, skip_special_tokens=True)

    decoded_outputs = tokenizer.decode(result.inference, skip_special_tokens=True)
    prompt_and_result = f"Input: {decoded_inputs} \t Output: {decoded_outputs}"
    print(prompt_and_result)
    logging.info('`runinference` : %s', prompt_and_result)
    #return decoded_outputs
    return (decoded_outputs, decoded_inputs)