import jieba
from collections import Counter

# 定义文件路径，这里假设三国演义的TXT文档名为san_guo_yan_yi.txt
file_path = 'D:\Python project\pythonProject\第八次Python作业\三国演义.txt'

# 使用'open'函数以读取模式('r')打开文件
with open(file_path, 'r', encoding='utf-8') as file:
    # 读取文件全部内容
    san_guo_yan_yi = file.read()

# 使用jieba进行分词
seg_list = jieba.cut(san_guo_yan_yi, cut_all=False)

# 创建一个Counter对象来统计词频
word_counts = Counter(seg_list)

# 输出词频统计结果，按照词频从高到低排序
for word, count in word_counts.most_common():
    print(f"{word}: {count}")