def tokenify()

in spamfilter.py [0:0]


    def tokenify(self, text: str):
        """ Cut out punctuation and return only meaningful words (not stopwords)"""
        remove_punct = "".join(
            [word.lower() for word in text if word not in self.punctuation]
        )
        tokenize = nltk.tokenize.word_tokenize(remove_punct)
        tokens = [word for word in tokenize if word not in self.stopwords]
        return tokens