# @Time : 2024/12/20 16:33
# @Author : ZHUYI
# @File : WordCloud'
import pandas as pd  # 表
import matplotlib.pyplot as plt  # 画图
import jieba  # 结巴分词
import wordcloud  # 词云


def word_cloud_view():
    text = ''
    with open('csv/juejin_comment.csv', 'r', encoding='utf-8') as fp:
        content = fp.read().strip().replace('\n', '')
        text += content
    cut_words = jieba.lcut(text)
    stop_words = ['，', '。', '的', '*', '～', '!', '了']
    res_words = ''
    for word in cut_words:
        if word not in stop_words:
            res_words += word

    words = pd.Series(jieba.lcut(res_words)).value_counts()
    wc = wordcloud.WordCloud(font_path=r'C:\Windows\Fonts\simfang.ttf', width=1200, height=600,
                             background_color='white')
    wc.generate_from_frequencies(words)
    plt.imshow(wc)
    plt.axis("off")
    plt.show()
