# 读取素材中的“test.txt”文件，使用jieba库进行分词，并将出现次数
# 前5的词汇和出现次数信息写入到“output.txt”文件中。
import jieba

txt =open("D:\\test.txt","rt",encoding='utf-8')
txt1=open("D:\\output.txt","w")
txts=txt.read()
words=jieba.lcut(txts)
counts={}
for word in words:
    if len(word)==1:
        continue
    else:
        counts[word]=counts.get(word,0)+1
items=list(counts.items())
items.sort(key=lambda x:x[-1],reverse=True)
for i in range(5):
    word,counts=items[i]
    txt1.writelines(str(items[i]))
    print("{0:<10}{1:>5}".format(word,counts))
txt.close()
txt1.close()