def get_tokenizer()

in inference/etl.py [0:0]


def get_tokenizer(data):
    from tensorflow.keras.preprocessing.text import Tokenizer
    tokenizer = Tokenizer(num_words = MAX_NB_WORDS) 
    tokenizer.fit_on_texts(data)
    return tokenizer