'''对中文文本进行分词，然后进行词频统计'''
'''导入相关的库读取文本'''
import jieba
txt = open("threekingdoms.txt", "r", encoding="utf-8").read()
words = jieba.lcut(txt)
'''对中文文本中的词频进行统计'''
counts = {}
for word in words:
    if len(word) == 1:
        continue
    else:
        counts[word] = counts.get(word, 0) + 1
counts



'''人名不包括“将军”，“却说”，“荆州”，“二人”，“不可”，“不能”，“如此”'''
excludes = {"将军", "却说", "荆州", "二人", "不可", "不能", "如此"}

'''删掉不是人名的词语'''
for word in excludes:
    del counts[word]

'''当一个人有多个称呼，用一个称呼代替其他称呼'''
for word in words:
    if len(word) == 1:
        continue
    elif word == "诸葛亮" or word == "孔明曰":
     rword = "孔明"
    elif word == "关公" or word == "云长":
     rword = "关羽"
    elif word == "玄德" or word == "玄德曰":
     rword = "刘备"
    elif word == "孟德" or word == "丞相":
     rword = "曹操"
    else:
     rword = word
    counts[rword] = counts.get(rword, 0) + 1

'''对词频统计结果进行排序，输出前15个词频最高的单词'''
items = list(counts.items())
items.sort(key=lambda x: x[1], reverse=True)

for i in range(15):
    word, count = items[i]
    print("{0:<10}{1:>5}".format(word, count))
