import nltk
# nltk.download('stopwords')
from nltk.corpus import stopwords
# stopwords = stopwords.words('english')
# 参考链接
# https://blog.csdn.net/sc_jizhi/article/details/125032736
# https://www.cnblogs.com/orenoxuan/p/16084969.html

with open ("text1.txt","r") as file:
    text = file.readline()

##初始文本中我故意把"amm" " jast" "booy" "basketball"打错来测试拼写检查
# text = 'I amm jast a booy, and (( loved 我baskerball 还a lot. Just a lucky boy喜欢.'
# 分句
sentences = nltk.sent_tokenize(text)

# 分词
tokens = [nltk.word_tokenize(sent) for sent in sentences]

# 过滤停用词
filtered_tokens = [[word for word in sent if word not in stopwords.words('english')] for sent in tokens]

print(filtered_tokens)

# 选择特征词
fdist = nltk.FreqDist(filtered_tokens)
features = fdist.most_common(50)

# 计算词的权重
word_weights = {}
for word, freq in features:
    word_weights[word] = freq
print(word_weights)