import pandas as pd
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from datetime import datetime
import os
from collections import Counter

class WordCloudGenerator:
    def __init__(self):
        # 创建输出目录
        self.wordcloud_dir = 'data/wordcloud'
        self.wordfreq_dir = 'data/word_freq'
        os.makedirs(self.wordcloud_dir, exist_ok=True)
        os.makedirs(self.wordfreq_dir, exist_ok=True)

    def load_stopwords(self, filepath='stopwords.txt'):
        """
        加载停用词表
        filepath: 停用词文件路径
        返回停用词集合
        """
        if not os.path.exists(filepath):
            print("未找到 stopwords.txt，跳过停用词过滤。")
            return set()
        try:
            with open(filepath, encoding='utf-8') as f:
                return set(line.strip() for line in f if line.strip())
        except Exception as e:
            print(f"加载停用词文件时出错: {e}")
            return set()

    def process_text(self, df, stopwords):
        """
        处理文本内容，返回分词结果和词频统计
        df: 包含文本的DataFrame
        stopwords: 停用词集合
        返回(分词后的文本, 词频字典)
        """
        text = ''
        if 'title' in df.columns:
            text += ' '.join(df['title'].astype(str).tolist()) + ' '
        if 'summary' in df.columns:
            text += ' '.join(df['summary'].astype(str).tolist()) + ' '
        
        if not text.strip():
            return None, None
        
        # 中文分词
        words = jieba.cut(text)
        # 过滤停用词和非中文字符
        filtered_words = [
            word for word in words 
            if word not in stopwords 
            and len(word) > 1 
            and '\u4e00' <= word[0] <= '\u9fff'
        ]
        final_text = ' '.join(filtered_words)
        word_freq = Counter(filtered_words)
        
        return final_text, word_freq

    def save_word_freq(self, word_freq, output_filename):
        """
        保存词频字典到CSV文件
        word_freq: 词频字典
        output_filename: 输出文件名
        """
        output_path = os.path.join(self.wordfreq_dir, output_filename)
        try:
            # 将词频字典转换为DataFrame并排序
            df = pd.DataFrame.from_dict(word_freq, orient='index', columns=['frequency'])
            df = df.sort_values(by='frequency', ascending=False)
            df.to_csv(output_path, encoding='utf-8-sig')
            print(f"词频文件已保存：{output_path}")
        except Exception as e:
            print(f"保存词频文件时出错: {e}")

    def generate_wordcloud_from_csv(self, csv_path, output_filename, stopwords):
        """
        从CSV文件生成词云
        csv_path: CSV文件路径
        output_filename: 词云图输出文件名
        stopwords: 停用词集合
        """
        if not os.path.exists(csv_path):
            print(f"未找到文件: {csv_path}")
            return
        try:
            df = pd.read_csv(csv_path)
            final_text, word_freq = self.process_text(df, stopwords)
            
            if not final_text:
                print(f"{csv_path} 中没有有效文本，跳过处理。")
                return
            
            # 保存词频文件
            freq_filename = output_filename.replace('.png', '_freq.csv')
            self.save_word_freq(word_freq, freq_filename)
            
            # 创建词云对象
            wc = WordCloud(
                font_path='msyh.ttc',
                width=800,
                height=600,
                background_color='white',
                max_words=200,
                stopwords=stopwords,
                colormap='hsv'
            )
            wc.generate(final_text)
            # 保存词云图
            output_path = os.path.join(self.wordcloud_dir, output_filename)
            wc.to_file(output_path)
            print(f"词云已保存：{output_path}")
        except Exception as e:
            print(f"生成词云时出错: {e}")

if __name__ == "__main__":
    today = datetime.now().date()
    national_csv = f"data/csv/{today}national_news.csv"
    international_csv = f"data/csv/{today}international_news.csv"
    generator = WordCloudGenerator()
    # 加载停用词集合
    stopwords = generator.load_stopwords()
    print("\n正在生成国内新闻词云...")
    generator.generate_wordcloud_from_csv(national_csv, "national_wordcloud.png", stopwords)
    print("\n正在生成国际新闻词云...")
    generator.generate_wordcloud_from_csv(international_csv, "international_wordcloud.png", stopwords)