import jieba

# 假设存在一个名为 三国演义.txt 的文件，其中包含“三国演义”的文本内容
try:
    with open("三国演义.txt", "r", encoding="utf-8") as f:
        text = f.read()
except FileNotFoundError:
    print("文件 sanguo.txt 未找到，请确保文件存在。")
    exit()

# 使用 jieba 进行分词
words = jieba.cut(text)

# 统计词频
counts = {}
for word in words:
    # 排除一些常用词和标点符号
    if len(word) > 1 and word != "，" and word != "。" and word != "、" and word != "的" and word != "是" and word != "在" and word != "了" and word != "有" and word != "也" and word != "都" and word != "就" and word != "与" and word != "又" and word != "个" and word != "这" and word != "那" and word != "他" and word != "她" and word != "之" and word != "而" and word != "为" and word != "则" and word != "上" and word != "下" and word != "中" and word != "内" and word != "外" and word != "前" and word != "后" and word != "左" and word != "右" and word != "东" and word != "西" and word != "南" and word != "北":
        if word in counts:
            counts[word] += 1
        else:
            counts[word] = 1

# 按照词频排序
sorted_counts = sorted(counts.items(), key=lambda x: x[1], reverse=True)

# 输出词频最高的 20 个词
print("词频最高的 20 个词：")
for word, count in sorted_counts[:20]:
    print(f"{word}: {count}")
