result = result.replace('3', 'e')
for x in string.punctuation:
result = result.replace(x, "")
- chunks = [
- self.stemmer.stem(word) for word in nltk.word_tokenize(result)
- ]
+ chunks = [self.stemmer.stem(word) for word in nltk.word_tokenize(result)]
return ' '.join(chunks)
def tokenize(self, text: str):