#将来自hupu_post.txt的文本进行分析，剔除爬虫抓取时产生的无用文本，提取帖子标题上的关键字，并按照出现频率进行排序，最后最后输出为most_popular.txt
import jieba
f = open("most_popular_reply.txt",'w')


txt = "hupu_post_reply.txt"
book = open(txt, 'r', encoding='utf-8').read()

words = jieba.lcut(book)
ls = []
counts = {}
for word in words:
    ls.append(word)
    if len(word) == 1:
        continue
    else:
        counts[word] = counts.get(word, 0) + 1

ex = {'一个', '什么', '这个', '这样', '这些', '这种', '已经', '如果', '的话','data','https','div','target','oss','brank','href'
      ,'class','hoopchina','blank','com','hupu','font','br','bbs','style','html','span','cn','small','img'
      ,'gif','image','03','800','12','src','process','resize','format','webp','imgid','0px','family','quote'
      ,'content','players','color','发自','13','mso','f999','teams','blogfile','mobile','thread','999','客户端',
      'jpg','宋体','hupuapp','201912','size','padding','2019','fareast','theme','minor','Android','Calibri'
      ,'比赛','没有','51','i2','1080','bbsvia9','300','nba','margin','i1','i3','width','text'
      ,'就是','赛季','iPhone','球队','ascii','FirstBbsImg','hansi','bbsvia10','BbsImg','球员','height'
      ,'png','url','02','数据','icon','sans','lazy','latin','01','400','得分','但是','不是','http','serif'
      ,'球迷','script','arial','align','background','images','可以','EN','US','自己','历史','最佳','现在','第一'
      ,'防守','lang','images','可以','14','650','cdth','rgb','face','b1','E5%','10','E4%','line','我们','web'
      ,'sns','placeholder','original','justify','11','还是','30','amp','表现','touch','720','18','tag','break'
      ,'word','yahei','20','179451069362940','em','觉得','能力','为什么','知道','场均','normal','216549819843043'
      ,'他们','3306'}

for word in ex:
    del (counts[word])

items = list(counts.items())
items.sort(key=lambda x: x[1], reverse=True)
for i in range(20):
    word, count = items[i]
    f.write("{:<10}{:>5}".format(word, count))

