import json
import jieba
from snownlp import SnowNLP
from collections import Counter
from gensim import corpora, models
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import warnings

warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False

def analyze_news(news_data):
    """分析新闻数据，包括分词、情感分析、主题建模和可视化"""
    print("开始新闻文本分析...")

    # 读取数据
    with open(news_data, 'r', encoding='utf-8') as f:
        data_list = json.load(f)

    # 提取标题列表
    title_list = [data['title'] for data in data_list if data['title'] != '无标题']
    if not title_list:
        print("没有有效标题进行分析")
        return

    # 停用词
    stopwords = {
        '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一',
        '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有',
        '看', '好', '自己', '这', '那', '里', '下', '把', '过', '什么', '么',
        '为', '又', '可', '家', '学', '只', '以', '日', '还', '月', '分', '钟',
        '得', '如', '年', '同', '现在', '中', '最', '能', '之', '问题', '然后', '为何', '如何',
        '因为', '所以', '但是', '如果', '已经', '可以', '这样', '这些', '那些', '发生', '情况',
        '怎么', '什么', '哪里', '时候', '东西', '方面', '地方', '工作', '自己的', '回应', '视频'
    }

    # 分词处理
    words_list = []
    word_freq = Counter()

    for title in title_list:
        words = jieba.lcut(title)
        filtered_words = [
            word for word in words
            if word not in stopwords
               and len(word) > 1
               and word.strip()
               and not word.isdigit()
        ]
        words_list.append(filtered_words)
        word_freq.update(filtered_words)

    # 情感分析
    sentiment_list = []
    sentiment_sum = 0

    for title in title_list:
        try:
            s = SnowNLP(title)
            sentiment = s.sentiments
            sentiment_list.append(sentiment)
            sentiment_sum += sentiment
        except Exception:
            sentiment_list.append(0.5)
            sentiment_sum += 0.5

    sentiment_avg = sentiment_sum / len(title_list)
    positive = sum(1 for s in sentiment_list if s > 0.6)
    negative = sum(1 for s in sentiment_list if s < 0.4)
    neutral = len(sentiment_list) - positive - negative

    # 主题建模
    filtered_words_list = [words for words in words_list if words]
    topics = []

    if filtered_words_list:
        try:
            dictionary = corpora.Dictionary(filtered_words_list)
            dictionary.filter_extremes(no_below=2, no_above=0.8)
            corpus = [dictionary.doc2bow(words) for words in filtered_words_list]

            lda = models.LdaModel(
                corpus,
                num_topics=5,
                id2word=dictionary,
                random_state=42,
                passes=10,
                alpha='auto'
            )

            topics = lda.print_topics(num_words=5)
        except Exception as e:
            print(f"主题建模失败: {e}")

    # 输出分析结果
    print("\n分析结果:")
    print(f"总标题数: {len(title_list)}")
    print(f"平均情感倾向: {sentiment_avg:.3f}")
    print(f"情感分布: 积极 {positive}，中性 {neutral}，消极 {negative}")
    
    if topics:
        print("\n主题分析:")
        for i, topic in enumerate(topics):
            print(f"主题 {i + 1}: {topic[1]}")

    # 生成词云图
    try:
        word_freq_dict = dict(word_freq)
        wc = WordCloud(
            font_path='C:\\Windows\\Fonts\\SimHei.ttf',
            width=1200,
            height=800,
            background_color='white',
            max_words=200,
            max_font_size=150,
            random_state=42
        )
        
        wc.generate_from_frequencies(word_freq_dict)
        plt.figure(figsize=(15, 10))
        plt.imshow(wc, interpolation='bilinear')
        plt.axis('off')
        plt.title('新闻关键词词云图')
        plt.savefig('词云图.png', dpi=300, bbox_inches='tight')
        
    except Exception as e:
        print(f"词云图生成失败: {e}")

    # 生成情感分布图
    try:
        plt.figure(figsize=(12, 4))

        # 情感值分布直方图
        plt.subplot(1, 2, 1)
        plt.hist(sentiment_list, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
        plt.axvline(sentiment_avg, color='red', linestyle='--', label=f'平均值: {sentiment_avg:.3f}')
        plt.xlabel('情感倾向值')
        plt.ylabel('新闻数量')
        plt.title('情感倾向分布')
        plt.legend()
        plt.grid(True, alpha=0.3)

        # 情感分类饼图
        plt.subplot(1, 2, 2)
        labels = ['积极', '中性', '消极']
        sizes = [positive, neutral, negative]
        colors = ['#ff9999', '#66b3ff', '#99ff99']
        plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90)
        plt.title('情感分类分布')

        plt.tight_layout()
        plt.savefig('情感分析图表.png', dpi=300, bbox_inches='tight')
        plt.show()

    except Exception as e:
        print(f"图表生成失败: {e}")

    # 返回分析结果
    return {
        'total_titles': len(title_list),
        'total_words': len(word_freq),
        'avg_sentiment': sentiment_avg,
        'sentiment_distribution': {
            'positive': positive,
            'neutral': neutral,
            'negative': negative
        },
        'top_words': dict(word_freq.most_common(20)),
        'topics': [topic[1] for topic in topics] if topics else []
    }

if __name__ == '__main__':
    news_data_file = 'news_data.json'
    analysis_result = analyze_news(news_data_file)