from wordcloud import WordCloud
import matplotlib.pyplot as plt
import jieba


def read_deal_text():
    with open("ciyun.txt", "r") as f:  # 读取我们的待处理本文
        txt = f.read()

    re_move = ["，", "。", '\n', '也是', '都是', '第', '章', '是在', '对着', '许多', '即便是', '望着', '若是', '在', '了', '上', '的', '\xa0']  # 无效数据
    # 去除无效数据
    for i in re_move:
        txt = txt.replace(i, " ")
    word = jieba.lcut(txt)  # 使用精确分词模式进行分词后保存为word列表

    with open("txt_save.txt", 'w') as file:
        for i in word:
            file.write(str(i) + ' ')
    print("文本处理完成")

def img_grearte():
    with open("txt_save.txt", "r") as file:
        txt = file.read()
    word = WordCloud(
        font_path='C:\\Windows\\Font\\simkai.ttf',background_color="white",
                     width=800,
                     height=800
                     ).generate(txt)
    word.to_file('doupocangqiong.png')
    print("词云图片已保存")

    plt.axis("off")
    plt.show()

read_deal_text()
img_grearte()
