import pickle
import jieba
from collections import Counter 
from snownlp import SnowNLP


with open('content.ekl','rb') as f1:
    line = pickle.loads(f1.read())
    # print(line)

#分词
seg_list = jieba.cut(line)  
c = Counter()  
for x in seg_list:  
	if len(x)>1 and x != '\r\n':  
		c[x] += 1  
print('常用词频度统计结果')  
aa =""
for (k,v) in c.most_common(100):  
	print(str(k) + ":" +  str(v))
	aa = aa + str(k) + ":" + str(v) + '\n'
	# aa = aa +  '%s%s %s  %d' % ('  '*(5-len(k)), k, '*'*int(v/3), v) + "\n"
	# print('%s%s %s  %d' % ('  '*(5-len(k)), k, '*'*int(v/3), v))  
with open('词频统计.txt','w',encoding='utf-8') as f:
	f.write(aa)



#情感分析
print('情感分析统计结果')  
s = SnowNLP(line)
bb = ""
for key in s.sentences:
	asd  = SnowNLP(key).sentiments
	bb = bb +  key + '(' + str(asd) + ')' + '\n'
with open('情感分析.txt','w',encoding='utf-8') as f:
	f.write(bb)