from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import time
import random
import json
import jieba
from snownlp import SnowNLP
from gensim import corpora, models
import matplotlib.pyplot as plt
from collections import Counter
import warnings
from wordcloud import WordCloud
from openai import OpenAI
import os

warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False


def getDataWithSelenium(url, scroll_times=3):
    """爬取新闻数据"""
    chrome_options = Options()
    chrome_options.add_argument("--headless")
    chrome_options.add_argument("--no-sandbox")
    chrome_options.add_argument("--disable-dev-shm-usage")
    chrome_options.add_argument("--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36")

    driver = webdriver.Chrome(options=chrome_options)

    try:
        driver.get(url)

        print("\n" + "=" * 50)
        print("🖱️ 开始模拟页面滚动")
        print(f"📊 计划滚动次数: {scroll_times} 次")

        for i in range(scroll_times):
            print(f"   ⏳ 第 {i + 1}/{scroll_times} 次滚动...")
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(random.uniform(2, 4))

            current_height = driver.execute_script("return document.body.scrollHeight")
            scroll_step = current_height // 3

            for step in range(3):
                scroll_position = scroll_step * (step + 1)
                driver.execute_script(f"window.scrollTo(0, {scroll_position});")
                time.sleep(random.uniform(0.5, 1.5))

        driver.execute_script("window.scrollTo(0, 0);")

        print("\n" + "=" * 50)
        print("✅ 滑动完成，开始提取页面数据...")

        html = driver.page_source
        soup = BeautifulSoup(html, 'html.parser')
        items = soup.select('.channel-feed-item')
        print(f'✨ 已找到 {len(items)} 条新闻条目')

        all_news = []

        print("🔄 正在解析新闻数据...")
        for i, item in enumerate(items, 1):
            title_element = (item.select_one('.question-title-text') or
                             item.select_one('.article-title-text') or
                             item.select_one('.question-title') or
                             item.select_one('a'))
            title = title_element.get_text(strip=True) if title_element else "无标题"

            link_element = item.select_one('a[href]')
            link = link_element.get('href') if link_element else "无链接"
            if link and link.startswith('/'):
                link = 'https://news.qq.com' + link

            source_element = item.select_one('.media-name')
            source = source_element.get_text(strip=True) if source_element else "未知来源"

            time_element = item.select_one('.time')
            time_text = time_element.get_text(strip=True) if time_element else "无时间"

            collect_element = (item.select_one('.article-collect .interation-num') or
                               item.select_one('[class*="collect"]'))
            collect_num = collect_element.get_text(strip=True) if collect_element else "0"

            comment_element = item.select_one('.article-comment .interation-num')
            comment_num = comment_element.get_text(strip=True) if comment_element else "0"

            news_item = {
                'index': i,
                'title': title,
                'link': link,
                'source': source,
                'time': time_text,
                'comments': comment_num,
                'collections': collect_num
            }

            all_news.append(news_item)

        print(f"✅ 成功解析 {len(all_news)} 条新闻")
        return all_news

    except Exception as e:
        print("\n" + "=" * 50)
        print("❌ 数据爬取过程出现错误")
        print("=" * 50)
        print(f"错误信息：{e}")
        print("建议：请检查网络连接和目标网页状态，稍后重试")
        return []
    finally:
        print("\n🔚 关闭浏览器...")
        driver.quit()
        print("\n" + "=" * 50)
        print("🏁 网页数据爬取结束")
        print("=" * 50)


def analyze_news_text(news_data):
    """分析新闻文本"""
    print("\n" + "=" * 50)
    print("📡 开始新闻文本分析")
    print("=" * 50)

    # 1. 提取标题
    title_list = []
    for data in news_data:
        title = data.get('title', '')
        if title and title != '无标题':
            title_list.append(title)

    print(f"📰 有效标题数量: {len(title_list)}")

    if not title_list:
        print("❌ 没有有效标题进行分析")
        return

    # 2. 准备停用词
    stopwords = {
        '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一',
        '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有',
        '看', '好', '自己', '这', '那', '里', '下', '把', '过', '什么', '么',
        '为', '又', '可', '家', '学', '只', '以', '日', '还', '月', '分', '钟',
        '得', '如', '年', '同', '现在', '中', '最', '能', '之', '问题', '然后', '为何', '如何',
        '因为', '所以', '但是', '如果', '已经', '可以', '这样', '这些', '那些', '发生', '情况',
        '怎么', '什么', '哪里', '时候', '东西', '方面', '地方', '工作', '自己的', '回应', '视频'
    }

    # 3. 分词处理
    print("⚙️ 正在进行中文分词...")
    words_list = []
    word_freq = Counter()

    for title in title_list:
        words = jieba.lcut(title)
        filtered_words = [
            word for word in words
            if word not in stopwords  # 排除停用词
               and len(word) > 1  # 排除单字词
               and word.strip()  # 排除空白字符
               and not word.isdigit()  # 排除纯数字
        ]
        words_list.append(filtered_words)
        word_freq.update(filtered_words)

    print(f"✅ 分词完成，共得到 {len(word_freq)} 个不同词汇")

    # 4. 情感分析
    print("😊 正在进行情感分析...")
    sentiment_list = []
    sentiment_sum = 0

    for title in title_list:
        try:
            s = SnowNLP(title)
            sentiment = s.sentiments
            sentiment_list.append(sentiment)
            sentiment_sum += sentiment
        except Exception as e:
            sentiment_list.append(0.5)
            sentiment_sum += 0.5

    sentiment_avg = sentiment_sum / len(title_list)

    # 情感分类统计
    positive = sum(1 for s in sentiment_list if s > 0.6)
    negative = sum(1 for s in sentiment_list if s < 0.4)
    neutral = len(sentiment_list) - positive - negative

    print(f"✅ 情感分析完成")
    print(
        f"   平均情感倾向: {sentiment_avg:.3f} {'(偏积极)' if sentiment_avg > 0.6 else '(偏消极)' if sentiment_avg < 0.4 else '(中性)'}")
    print(f"   积极: {positive} ({positive / len(sentiment_list) * 100:.1f}%)")
    print(f"   中性: {neutral} ({neutral / len(sentiment_list) * 100:.1f}%)")
    print(f"   消极: {negative} ({negative / len(sentiment_list) * 100:.1f}%)")

    # 5. 主题建模
    print("🎯 正在进行主题建模...")
    filtered_words_list = [words for words in words_list if words]

    topics = []
    if filtered_words_list:
        try:
            dictionary = corpora.Dictionary(filtered_words_list)
            dictionary.filter_extremes(no_below=2, no_above=0.8)
            corpus = [dictionary.doc2bow(words) for words in filtered_words_list]

            lda = models.LdaModel(
                corpus,
                num_topics=5,
                id2word=dictionary,
                random_state=42,
                passes=10,
                alpha='auto'
            )

            topics = lda.print_topics(num_words=5)
            print(f"✅ 主题建模完成，提取了 {len(topics)} 个主题")

        except Exception as e:
            print(f"❌ 主题建模失败: {e}")

    # 6. 输出分析结果
    print("\n" + "=" * 50)
    print("📊 文本分析结果汇总")
    print("=" * 50)

    print(f"📈 基本统计:")
    print(f"   总标题数: {len(title_list)}")
    print(f"   总词汇数: {len(word_freq)}")
    print(f"   平均情感倾向: {sentiment_avg:.3f}")

    print(f"\n🔥 高频词汇 (Top 15):")
    for word, freq in word_freq.most_common(15):
        print(f"   {word}: {freq}")

    if topics:
        print(f"\n🎯 主题分析:")
        for i, topic in enumerate(topics):
            print(f"   主题 {i + 1}: {topic[1]}")

    # 生成词云图
    print("\n🌈 正在生成词云图...")
    try:
        # 将词频转换为字典格式
        word_freq_dict = dict(word_freq)
        
        # 创建词云对象
        wc = WordCloud(
            font_path='C:\\Windows\\Fonts\\SimHei.ttf',  # 使用Windows系统自带的黑体字体完整路径
            width=1200,
            height=800,
            background_color='white',
            max_words=200,
            max_font_size=150,
            random_state=42
        )
        
        # 生成词云
        wc.generate_from_frequencies(word_freq_dict)
        
        # 创建新的图形
        plt.figure(figsize=(15, 10))
        plt.imshow(wc, interpolation='bilinear')
        plt.axis('off')
        plt.title('新闻关键词词云图')
        
        # 保存词云图
        plt.savefig('词云图.png', dpi=300, bbox_inches='tight')
        print("📊 词云图已生成并保存为 '词云图.png'")
        
    except Exception as e:
        print(f"❌ 词云图生成失败: {e}")

    # 7. 生成情感分布图
    try:
        plt.figure(figsize=(12, 4))

        # 情感值分布直方图
        plt.subplot(1, 2, 1)
        plt.hist(sentiment_list, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
        plt.axvline(sentiment_avg, color='red', linestyle='--', label=f'平均值: {sentiment_avg:.3f}')
        plt.xlabel('情感倾向值')
        plt.ylabel('新闻数量')
        plt.title('情感倾向分布')
        plt.legend()
        plt.grid(True, alpha=0.3)

        # 情感分类饼图
        plt.subplot(1, 2, 2)
        labels = ['积极', '中性', '消极']
        sizes = [positive, neutral, negative]
        colors = ['#ff9999', '#66b3ff', '#99ff99']

        plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90)
        plt.title('情感分类分布')

        plt.tight_layout()
        plt.savefig('情感分析图表.png', dpi=300, bbox_inches='tight')
        plt.show()

        print("📊 情感分析图表已生成并保存为 '情感分析图表.png'")

    except Exception as e:
        print(f"❌ 图表生成失败: {e}")

    # 8. 返回分析结果
    return {
        'total_titles': len(title_list),
        'total_words': len(word_freq),
        'avg_sentiment': sentiment_avg,
        'sentiment_distribution': {
            'positive': positive,
            'neutral': neutral,
            'negative': negative
        },
        'top_words': dict(word_freq.most_common(20)),
        'topics': [topic[1] for topic in topics] if topics else []
    }

def AI_DeepSeek(user_input):
    print("\n" + "=" * 50)
    print("🤖 开始 AI 深度分析")
    print("=" * 50)

    print("📊 正在解析分析数据...")
    
    # 初始化 OpenAI 客户端
    client = OpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    )

    # 定义完整思考过程和完整回复
    reasoning_content = ""
    answer_content = ""
    is_answering = False

    # 用户身份和前期提示词
    system_message = {
        "role": "system",
        "content": "- Role: 数据分析总结专家\n"
                   "- Background: 我已完成新闻数据的分析工作，包括分词、情感分析、主题建模等步骤，并使用了 jieba、SnowNLP、collections、gensim、matplotlib、wordcloud 等工具。"
                   "现在需要对这些分析的结论数据进行总结，以及结合当今时代局势得出你的分析结论。\n"
                   "提供的新闻数据分析结论数据，提取关键信息，以简洁的语言进行总结，确保总结内容贴合数据\n"
                   "总结必须使用中文，简洁明了，准确反映数据分析的核心结论。\n"
                   "只有发挥，你根据以上的数据，说明了现在局势反映了什么问题"
                   "- Output Format: 表述总结。"
    }

    try:
        # 创建聊天完成请求
        completion = client.chat.completions.create(
            model="deepseek-v3",  # 使用指定的模型
            messages=[
                system_message,  # 添加系统消息，定义用户身份
                {"role": "user", "content": user_input}  # 用户输入的提示词
            ],
            stream=True
        )

        # print("\n" + "=" * 20 + "思考过程" + "=" * 20 + "\n")

        for chunk in completion:
            # 如果 chunk.choices 为空，则打印 usage
            if not chunk.choices:
                print("\nUsage:")
                print(chunk.usage)
            else:
                delta = chunk.choices[0].delta
                # 打印思考过程
                if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None:
                    print(delta.reasoning_content, end='', flush=True)
                    reasoning_content += delta.reasoning_content
                else:
                    # 开始回复
                    if delta.content != "" and not is_answering:
                        print("\n" + "=" * 50)
                        print("📝 AI 分析结论")
                        print("=" * 50)
                        is_answering = True
                    # 打印回复过程
                    print(delta.content, end='', flush=True)
                    answer_content += delta.content

    except Exception as e:
        print("\n" + "=" * 50)
        print("❌ AI 分析过程中出现错误")
        print("=" * 50)
        print(f"错误信息：{e}")
        print("建议：请检查网络连接和 API 密钥配置，稍后重试。")
    
    print("\n" + "=" * 50)
    print("✅ AI 深度分析完成")
    print("=" * 50)

def main():
    """主函数：爬取数据并进行文本分析"""
    print("🚀 启动腾讯网新闻爬取与分析系统")
    print("=" * 50)

    # 1. 爬取新闻数据
    url = "https://news.qq.com/"
    print(f"📡 开始爬取新闻: {url}")
    news_data = getDataWithSelenium(url, scroll_times=5)

    if not news_data:
        print("❌ 没有获取到新闻数据")
        return
    print(f"📡 爬取新闻完成")

    print(f"✅ 成功爬取 {len(news_data)} 条新闻")

    # 2. 保存原始数据
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    json_filename = f'news_data_{timestamp}.json'

    with open(json_filename, 'w', encoding='utf-8') as f:
        json.dump(news_data, f, ensure_ascii=False, indent=2)

    print(f"💾 原始数据已保存到: {json_filename}")

    # 3. 进行文本分析
    analysis_result = analyze_news_text(news_data)

    # 4. 保存分析结果
    if analysis_result:
        analysis_filename = f'analysis_result_{timestamp}.json'
        with open(analysis_filename, 'w', encoding='utf-8') as f:
            json.dump(analysis_result, f, ensure_ascii=False, indent=2)

        print(f"💾 分析结果已保存到: {analysis_filename}")

    # 5. AI DeepSeek 分析
    user_input = json.dumps(analysis_result, ensure_ascii=False, indent=2)
    AI_DeepSeek(user_input)

    print("\n🎉 新闻爬取与分析完成！")

    return news_data, analysis_result


if __name__ == '__main__':
    # 确保安装了必要的库
    required_libs = [
        'selenium', 'beautifulsoup4', 'jieba', 'snownlp', 'gensim',
        'matplotlib', 'wordcloud', 'openai', 'warnings', 'time',
        'random', 'json', 'os', 'collections'
    ]
    print("📦 请确保已安装以下库:")
    for lib in required_libs:
        print(f"   pip install {lib}")
    print()

    # 运行主程序
    news_data, analysis_result = main()