#将来自hupu_post_reply.txt的文本进行分析，剔除爬虫抓取时产生的无用文本，提取帖子回复中的关键字，并按照出现频率进行排序，最后输出为most_popular_reply.txt

import jieba
f = open("most_popular_reply.txt",'w')


txt = "hupu_post_reply.txt"
book = open(txt, 'r', encoding='utf-8').read()

words = jieba.lcut(book)
ls = []
counts = {}
for word in words:
    ls.append(word)
    if len(word) == 1:
        continue
    else:
        counts[word] = counts.get(word, 0) + 1

ex = {'一个', '什么', '这个', '这样', '这些', '这种', '已经', '如果', '的话','发表'
      ,'就是','不是','没有','19','15','16','03','可以','还是','21','11','10','17'
      ,'20','第一','球队','40','球员','现在','18','14','13','数据','自己','58','37'
      ,'48','04','27','35','26','22','30','53','55','41','44','34','31','02','2019'
      ,'02','51','57','46','45','51','52','54','43','怎么','这么','内容','这么'
      ,'真的','49','42','01','56','真的','但是','知道','可能','24','29','47','球迷'
      ,'觉得','引用','25','00','05','23','39','不能'}

for word in ex:
    del (counts[word])

items = list(counts.items())
items.sort(key=lambda x: x[1], reverse=True)
for i in range(20):
    word, count = items[i]
    f.write("{:<10}{:>5}".format(word, count))

