import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
import jieba.posseg as psg


import warnings
warnings.filterwarnings("ignore")

reviews = pd.read_csv('./reviews.csv')

reviews = reviews.drop_duplicates(subset=['content','content_type'])
content = reviews["content"]
# 去除英文、数字、京东、美的、电热水器等词语,pattern
strinfo = re.compile('[0-9a-zA-Z]|京东|美的|电热水器|热水器|')
content = content.apply(lambda x: strinfo.sub('',x))
# 分词
worker = lambda s: [(x.word, x.flag) for x in psg.cut(s)] # 自定义简单分词函数
seg_word = content.apply(worker)
# 删除停用词
stop_path = open(r"./stoplist.txt", 'r',encoding='UTF-8')
stop = stop_path.readlines()
stop = [x.replace('\n', '') for x in stop]
# 遍历所有词，取出停用词并选出名词，统计词频
word_selected=[]
for word_set in seg_word:
    for w in word_set:
        if w[0] not in stop and 'n' in w[1]:
          word_selected.append(w[0])
word_count = pd.Series(word_selected).value_counts().astype("str")
data_wc = list(zip(word_count.index,word_count.values))

import pyecharts.options as opts
from pyecharts.charts import WordCloud
from pyecharts.globals import SymbolType


(
    WordCloud()
    .add(series_name="热点分析", data_pair=data_wc, word_size_range=[6, 66],shape=SymbolType.DIAMOND)
    .set_global_opts(
        title_opts=opts.TitleOpts(
            title="热点分析", title_textstyle_opts=opts.TextStyleOpts(font_size=23)
        ),
        tooltip_opts=opts.TooltipOpts(is_show=True),
    )
    .render("basic_wordcloud_0428.html")
    # .render_notebook()
)