def create_text_preprocessor()

in src/chug/text/tokenization.py [0:0]


def create_text_preprocessor(
        tokenizer: Union[str, Callable],
        max_length: int = 1024,
        task_start_token: Optional[str] = None,
        prompt_end_token: Optional[str] = None,
        ignore_id: int = -100,
        include_target: bool = True,
        return_dict: bool = True,
        input_key: str = "text_input",
        target_key: str = "text_target",