def encode_text()

in src/tokenizer.py [0:0]


    def encode_text(self, input_sentence, add_eos=False, add_sos=False):
        """Encodes an utterance """
        output_ids = []
        input_toks = input_sentence.split()
        if any(input_tok in self.special_tokens for input_tok in input_toks):
            for tok in input_toks:
                id = self.special_tokens[tok]
                output_ids.append(id)
        else:
            output_ids = self.encode(input_sentence, add_eos=add_eos, add_sos=add_sos)

        return output_ids