import jieba
import wordcloud
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt


# 读取文本文件
def read_text(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        text = f.read()
    return text

# 使用jieba进行分词
def segment_text(text):
    # 添加自定义词典（可选）
    # jieba.load_userdict("userdict.txt")
    
    # 使用jieba进行分词
    words = jieba.cut(text)
    
    # 过滤停用词（可选）
    stopwords = set()
    # 可以加载停用词表
    # with open('stopwords.txt', 'r', encoding='utf-8') as f:
    #     for line in f:
    #         stopwords.add(line.strip())
    
    # 过滤单个字符和停用词
    filtered_words = [word for word in words if len(word) > 1 and word not in stopwords]
    
    return " ".join(filtered_words)

# 创建心型词云
def create_heart_wordcloud(text, output_path="wordcloud.png"):
    # 创建心型掩码
    x, y = np.ogrid[:300, :300]
    mask = (x - 150) ** 2 + (y - 150) ** 2 > 130 ** 2
    mask = 255 * mask.astype(int)
    
    # 创建词云对象
    wc = wordcloud.WordCloud(
        font_path="simhei.ttf",  # 使用支持中文的字体
        background_color="white",
        max_words=200,
        mask=mask,
        contour_width=3,
        contour_color='red'
    )
    
    # 生成词云
    wc.generate(text)
    
    # 保存词云图片
    wc.to_file(output_path)
    
    # 显示词云
    plt.figure(figsize=(10, 8))
    plt.imshow(wc, interpolation='bilinear')
    plt.axis("off")
    plt.show()

# 主函数
def main():
    # 读取文本文件（替换为你的文本文件路径）
    text = read_text("my_text.txt")
    
    # 分词处理
    segmented_text = segment_text(text)
    
    # 创建心型词云
    create_heart_wordcloud(segmented_text, "heart_wordcloud.png")

if __name__ == "__main__":
    main()