import jieba
from collections import Counter

# 读取《三国演义》文本文件
with open('D:\App\三国演义.txt', 'r', encoding='utf-8') as f:
    text = f.read()

# 使用jieba进行分词
words = jieba.lcut(text)

# 过滤掉标点符号和单个字符（可根据需要调整）
filtered_words = [word for word in words if len(word) > 1 and not word.isspace()]

# 统计词频
word_counts = Counter(filtered_words)

# 输出前50个最高频的词
top_words = word_counts.most_common(50)
for word, count in top_words:
    print(f"{word}: {count}")
