def to_tensors()

in runinferenceutil/infra.py [0:0]


def to_tensors(input_text: str) -> torch.Tensor:
    """Encodes input text into token tensors.
    Args:
        input_text: Input text for the LLM model.
        tokenizer: Tokenizer for the LLM model.
    Returns: Tokenized input tokens.
    """

    return tokenizer.encode_plus(text=input_text,
                                 max_length=100,
                                 add_special_tokens=True, padding='max_length',
                                 return_attention_mask=True,
                                 return_token_type_ids=False,
                                 return_tensors="pt").input_ids[0]