import jieba
import re
from collections import Counter

def count_word_frequency(file_path):
    # 打开文件并读取内容
    with open(file_path, 'r', encoding='utf-8') as f:
        text = f.read()

    # 使用jieba进行分词并过滤非词汇字符
    word_filter = re.compile(r'^(?:[\u4e00-\u9fa5·]{2,16})+$')
    words = [word for word in jieba.lcut(text) if word_filter.match(word)]

    # 使用Counter统计词频
    word_count = Counter(words)

    return word_count

def main():
    # 《三国演义》文件路径
    file_path = "三国演义.txt"  # 请替换为实际文件路径

    # 统计词频
    word_frequency = count_word_frequency(file_path)

    # 输出词频统计结果
    for word, count in word_frequency.most_common():
        print(f"{word}: {count}")

if __name__ == "__main__":
    main()
