# -*- coding: utf-8 -*-
import re
import pandas as pd
import jieba.analyse
from snownlp import SnowNLP
import matplotlib.pyplot as plt
# 数据读取
datas = pd.read_csv('/data/dataset/weibo/6a.csv',encoding='utf-8')
print(datas.head(5))
# 热点话题的评论分词处理
# 停用词处理
jieba.analyse.set_stop_words('/data/dataset/weibo/chineseStopWords.txt')
# 对评论内容进一步分词
lines = []
for i in datas['评论内容']:
    # 进一步去除影响的词语
    i = re.sub('[0-9’!'#$%&\'()*+,-./:;<=>?@，。～?★、…【】《》？“”‘’！[\\]^_`{|}~\s]+', '', i)
    # 热门关键词提取
    seg_list = jieba.analyse.extract_tags(i, topK=5)
    lines.append(seg_list)
# 情感分析
list = []
for line in lines:
    if line != []:
        aa = SnowNLP(str(line))
        list.append(aa.sentiments)
#区间转换为[-0.5, 0.5]
result = []
i = 0
while i<len(list):
 result.append(list[i]-0.5)
 i = i + 1
# 以0.5位界限，大于的为积极，小于等于的为消极
n=0
m=0
for a in list:
    if a > 0.5:
        n = n + 1
    else:
        m = m + 1
m = m/len(list)
n = n/len(list)
# 积极情绪和消极情绪的分布占比
plt.figure(1)
labels = 'negative', 'positive'
sizes = [m, n]
colors = ['red', 'lightskyblue']
explode = (0, 0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors,autopct='%1.1f%%', shadow=True, startangle=90)
plt.axis('equal')
plt.title('Service - related microblogging emotional analysis')
plt.show()

# 情绪的总体分布情况
plt.plot(pd.np.arange(0, 1794, 1), result, 'k-')
plt.xlabel('Number')
plt.ylabel('Sentiment')
plt.title('Analysis of Sentiments')
plt.show()