import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords

with open("body.txt","r") as f:
    data = f.read()
print("open Done!")

words = word_tokenize(data)
print("word Done!")

useful_words = [word  for word in words if word not in stopwords.words('English')]
print("useword done")

frequency = nltk.FreqDist(useful_words)
print("frequency done")

print(frequency.most_common(100))

with open("ex4.txt", "w") as f:
    f.write(str(frequency.most_common(100)))
    f.write("\n")