def tokenize()

in inference/etl.py [0:0]


def tokenize(data, tokenizer):
    from tensorflow.keras.preprocessing.sequence import pad_sequences
    data = tokenizer.texts_to_sequences(data)
    return pad_sequences(data, maxlen = MAX_SEQUENCE_LENGTH)