import matplotlib.pyplot as plt
import pandas as pd
import jieba
import jieba.posseg as psg
import re
from wordcloud import WordCloud
import imageio

# 导入数据
data = pd.read_excel('emotion_data.xlsx')
data = data.dropna(subset=['评价分词'])

# 设置字体
font = {'family': 'SimHei', 'size': 14}  # SimHei 是黑体，可以更改为其他可用字体
plt.rc('font', **font)
# 绘制情感占比饼图
emotion_counts = data['情感标签'].value_counts()
labels = ['积极评论', '消极评论']
sizes = [emotion_counts[1], emotion_counts[0]]
colors = ['#ff9999', '#66b3ff']
explode = (0.1, 0)  # 仅 "积极评论" 突出

plt.figure(figsize=(8, 6))
plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', startangle=140)
plt.axis('equal')
plt.title('情感占比')
plt.savefig('png/情感占比饼图.png')
plt.show()

print("饼图成功生成!")

# 提取积极评论和消极评论
positive_comments = data[data['情感标签'] == 1]['评价'].tolist()
negative_comments = data[data['情感标签'] == 0]['评价'].tolist()


# 数据分词
def jieba_cut(all_list, name_path):
    # 分词列表
    text_list1 = []
    text_list2 = []
    for i in all_list:
        text1 = jieba.lcut(i)  # 分词
        for j in range(len(text1)):
            if text1[j] not in del_list:  # 去停用词
                text_list1.append(text1[j])  # 存入列表
        # 有词性分词
        text2 = psg.lcut(i)
        text_list2.append(text2)

    # 词云
    list1 = ['a']  # 词性抽取列表
    list2 = del_list  # 去停用词列表
    next = ''
    for i in range(len(text_list2)):  # 循环遍历评论
        for word, pos in text_list2[i]:  # 词性标注后循环
            if word not in list2:  # 去停用词
                if pos in list1:  # 词性抽取
                    next += word + ' '  # 将符合要求的词插入到字符串中
    wordcloud = WordCloud(font_path='../HeiTi.ttf', background_color='white', max_font_size=200,
                          mask=imageio.v3.imread('河南省轮廓图.png')).generate(
        next)  # 制作词云，依次设置字体、白色背景、最大字体设置200、导入背景轮廓图
    wordcloud.to_file(name_path)  # 生成图片


# 去停用词表
del_list = ['累', '陡', '遗憾', '小', '少', '贵']
jieba_cut(positive_comments, 'png/积极评论词云.png')
del_list = ['好', '方便', '便宜', '最好', '红', '不错']
jieba_cut(negative_comments, 'png/消极评论词云.png')

print("词云生成完成!")
