import json
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import jieba
import imageio
import pandas as pd
import re
# 读取文件内容
with open("../data/positive_samples.txt", "r", encoding="utf-8") as f:
    comments = f.readlines()
# 将所有评论合并为一个字符串
text = " ".join(comments)
cleaned_data = ''.join(re.findall('[\u4e00-\u9fa5]', text))
# print(cleaned_data)
# 使用jieba进行中文分词
data_cut = jieba.lcut(cleaned_data, cut_all=False)
# print(data_cut)
# 去停用词
stopword = pd.read_csv('../data/stopword.txt',sep='ooo',encoding='gbk',header=None,engine='python')
stopword = [' ']+list(stopword[0])
# print(stopword)
filter_word = []# 将第一列变成列表
for word in data_cut:
    if not word in stopword and len(word) > 1:
        filter_word.append(word)
# print(filter_word)
tf_dict = {}
for word in filter_word:
    if word not in tf_dict:
        tf_dict[word] = 1
    else:
        tf_dict[word] += 1
# print(tf_dict)
def term_freq(word_list, num=10):
    tf = pd.Series(word_list).value_counts()
    return tf[tf > num]
data = term_freq(filter_word)
# sort_tf=dict(sorted(tf_dict.items(),key=lambda x:x[1],reverse=True))
# print(sort_tf)
back_pic = imageio.imread('../data/background.jpg')
wc = WordCloud(
    font_path='C:/Windows/Fonts/simyou.ttf',  # 确保有中文字体文件
    width=800,
    height=400,mask=back_pic,
    background_color='white'
)
wc.fit_words(data)
plt.figure(figsize=(16, 8))
plt.imshow(wc)
plt.axis('off')
plt.show()
