import jieba
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt

# 1. 读取文本文件
with open("D:\\嘟嘟\\历史数据\\python学习\\实践项目1 jieba分词\\绘制词云图用文档\\唐诗\\唐诗三百首.txt", "r", encoding="utf-8") as f:
    text = f.read()

# 2. 加载停用词表
with open("D:\\嘟嘟\\历史数据\\python学习\\stopwords.txt", "r", encoding="utf-8") as f:
    stopwords = set(f.read().split())

# 3. 分词与过滤停用词
words = [word for word in jieba.cut(text) if word not in stopwords and len(word) > 1]
word_counts = Counter(words).most_common()  # 按词频排序

# 4. 定义生成词云的函数
def generate_wordcloud(words, font_path, mask=None, width=800, height=600, max_words=50):
    wc = WordCloud(
        font_path=font_path,          # 中文字体路径
        mask=mask,                    # 背景形状（png透明图）
        width=width, height=height,
        max_words=max_words,          # 最大关键词数量
        background_color="blue"      # 背景颜色
    )
    wc.generate_from_frequencies(dict(words))
    plt.figure(figsize=(width/100, height/100))
    plt.imshow(wc)
    plt.axis("off")
    plt.show()

# 5. 示例调用（可根据需求调整参数）
# 基础矩形词云图（黑体，50词）
generate_wordcloud(word_counts, font_path="D:\\嘟嘟\\历史数据\\python学习\\msyhbd.ttc", max_words=50)

# 自定义尺寸词云图（宽度1200，高度800，微软雅黑，30词）
generate_wordcloud(word_counts, font_path="D:\\嘟嘟\\历史数据\\python学习\\msyh.ttc", width=1200, height=800, max_words=30)

# 自定义背景形状词云图（五角星mask，楷体，100词）
from PIL import Image
import numpy as np
mask = np.array(Image.open("star_mask.png"))  # 五角星png图
generate_wordcloud(word_counts, font_path="D:\\嘟嘟\\历史数据\\python学习\\msyh.ttc", mask=mask, max_words=100)