# main.py
import sys
import os
from datetime import datetime, timedelta
import logging
from config import Config
from app import create_app, db
from models.models import RawNews, ProcessedNews
from ai_processing.summary_processor import BaiduNewsSummarizer, generate_board_summary
from ai_processing.ner_processor import extract_entities, map_entities_to_brands
from ai_processing.topic_classifier import classify_topic
from ai_processing.news_scorer import calculate_comprehensive_score

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

def calculate_initial_score(news):
    """
    计算新闻初始分数

    Args:
        news (ProcessedNews): 已处理的新闻对象

    Returns:
        float: 计算得到的初始分数值
    """
    score = 0

    # 实体权重分
    if news.involved_brands:
        for brand in news.involved_brands:
            weight = Config.ENTITY_WEIGHTS.get(brand, Config.ENTITY_WEIGHTS.get('default', 1))
            score += weight

    # 时效性分（简单实现）
    if news.publish_date:
        hours_old = (datetime.utcnow() - news.publish_date).total_seconds() / 3600
        # 使用配置中的时效性计算参数
        time_base_score = getattr(Config, 'TIME_BASE_SCORE', 24)
        time_score = time_base_score / (hours_old + 1)
        score += time_score

    return score

def populate_test_data():
    """
    填充测试数据
    """
    app = create_app()

    with app.app_context():
        # 检查是否已有未处理数据
        unprocessed_news = RawNews.query.filter_by(processed=False).count()
        if unprocessed_news > 0:
            logger.info(f"数据库中已有 {unprocessed_news} 条未处理新闻，跳过测试数据填充")
            return

        # 测试数据集
        test_news_data = [
            {
                "title": "小鹏汽车加速欧洲市场布局，开启海外本地化生产新篇章",
                "content": "小鹏汽车正式官宣欧洲本地化生产的重要规划，与位于奥地利格拉茨的麦格纳工厂展开深度合作，借助该工厂现有的成熟生产线，大力推动电动汽车在欧洲的本地化生产进程。",
                "source": "整车OEM",
                "url": "https://ne-time.cn/web/article/36688",
                "publish_date": datetime.utcnow()
            },
            {
                "title": "小米第四款新车曝光：全新增程SUV！2026年见",
                "content": "从小米SU7一炮打响，到YU7蓄势待发，小米造车这盘棋，走得越来越稳了。而就在大家还在热议SU7销量、YU7谍照的时候，小米的第四款新车——代号“昆仑”的全新增程SUV，已经悄悄浮出水面，预计2026年正式上市。这回，小米要玩把大的：方盒子造型、三排座椅、增程动力、激光雷达全上阵，直接剑指理想L9、问界M9这些“奶爸神车”，野心昭然若揭。1 方正造型+三排大空间，这回主打一个“实用至上",
                "source": "整车OEM",
                "url": "http://mp.weixin.qq.com/s?__biz=MzU4MDY1Mzk2Nw==&mid=2247698806&idx=1&sn=e3c4acbec1d9df7ebebf1094057efe61&chksm=fd5e0f96ca298680d77528966f9804f8b9b1e259d65a3b2868210c06f650d2947b3ae3ea8e4a#rd",
                "publish_date": datetime.utcnow() - timedelta(hours=2)
            },
            {
                "title": "或命名“ORA 5”！欧拉汽车发起首款纯电SUV命名投票！",
                "content": "[汽车之家 资讯] 日前，长城汽车欧拉品牌总经理发文称，欧拉从创立初期开始，就始终是一个与用户共创的品牌。所以现针对前不久工信部亮相的新车命名发起公开征集，车型投票名称包括欧拉猫(参数|询价)、ORA 5、ORA i5三种选项。截至发文，共有790人参与欧拉全新SUV车型命名投票，其中票数最高的是“ORA 5”，有399人选择；人气最低的是“欧拉猫”，票数为149。简单回顾新车，前不久吕文斌发布了其预告图，能看出整体风格延续了“猫”家族的设计理念，并且车身尺寸相比欧拉好猫更大，同时将会搭载激光雷达等硬件设备提升驾驶辅助能力。欧拉首款纯电SUV预告图",
                "source": "汽车之家",
                "url": "https://www.autohome.com.cn/news/202509/1309017.html",
                "publish_date": datetime.utcnow() - timedelta(days=3)
            },
            {
                "title": "特斯拉model Y 4D1电驱400V逆变器技术解读",
                "content": "特斯拉的“4D1”是目前广泛应用于 Model Y(图片|配置|询价) 后驱版（RWD）的主力电驱动平台，搭载了一套后置永磁同步电机（PMSM）+ SiC逆变器的组合，具有高效率、轻量化、低成本等特点。特斯拉 Model Y 所搭载的 4D1 逆变器代表了中端纯电驱动平台的高性价比方案：相比传统IGBT逆变器，4D1方案在效率、功率密度、系统集成度上均具有领先优势，是特斯拉持续降本增效的关键之一。",
                "source": "汽车新闻",
                "url": "https://www.dongchedi.com/article/7551676289887879720",
                "publish_date": datetime.utcnow() - timedelta(hours=2)
            },
            {
                "title": "鸿蒙智行首款旅行车享界S9T上市，搭载华为电驱",
                "content": "9月16日，鸿蒙智行旗下首款旅行车享界S9T(图片|配置|询价)正式上市。该车全系搭载华为DriveONE电驱动系统，提供纯电及增程两种动力版本，主打动力性能与能耗表现的平衡。享界S9T纯电版采用华为高压碳化硅动力平台，四驱版本前桥搭载158kW交流异步电机，后桥为227kW永磁同步电机。据官方数据，其Ultra版零百加速为3.95秒，CLTC续航里程达701公里。增程版则配备华为六合一电机，最高转速22000rpm，Ultra版CLTC综合续航为1305公里。该车搭载华为动态自适应扭矩控制系统，可实现毫秒级扭矩调节，旨在提升湿滑路面行驶稳定性和颠簸路况的滤震表现。全系支持6kW外放电功能，可输出220V交流电。华为DriveONE目前已与多个汽车品牌合作，为超过50款车型提供电驱解决方案。根据盖世汽车数据，2025年上半年其电驱动产品装机量位列行业第二，独立供应商中排名第一",
                "source": "菲菲定制",
                "url": "https://www.dongchedi.com/article/7551411135245795882",
                "publish_date": datetime.utcnow() - timedelta(days=3)
            }
        ]

        # 创建原始新闻对象列表
        raw_news_list = []
        for news_data in test_news_data:
            raw_news = RawNews(
                title=news_data["title"],
                content=news_data["content"],
                source=news_data["source"],
                url=news_data["url"],
                publish_date=news_data["publish_date"],
                processed=False
            )
            raw_news_list.append(raw_news)

        # 批量添加到数据库
        db.session.add_all(raw_news_list)
        db.session.commit()

        logger.info(f"成功填充 {len(raw_news_list)} 条测试数据")
        return raw_news_list

def process_single_news(raw_news):
    """
    处理单条新闻

    Args:
        raw_news (RawNews): 原始新闻对象

    Returns:
        ProcessedNews: 处理后的新闻对象
    """
    # 输入验证
    if raw_news is None:
        raise ValueError("原始新闻对象不能为空")

    if not hasattr(raw_news, 'content') or raw_news.content is None:
        logger.warning(f"新闻 '{raw_news.title}' 内容为空，使用空字符串替代")
        raw_news.content = ""

    if not hasattr(raw_news, 'title') or raw_news.title is None:
        logger.warning(f"新闻内容为空，使用默认标题")
        raw_news.title = "无标题"

    try:
        # 1. 生成详细摘要
        summary = None
        if Config.BAIDU_SUMMARY_ENABLED and Config.BAIDU_AI_API_KEY:
            try:
                summarizer = BaiduNewsSummarizer()
                summary = summarizer.summarize(raw_news.content)
            except Exception as e:
                logger.warning(f"百度AI摘要生成失败: {e}")

        if not summary:
            summary = "摘要生成失败"

        logger.info(f"为新闻 '{raw_news.title}' 生成摘要完成")

        # 2. 实体识别和分类
        full_text = raw_news.title + " " + raw_news.content
        entities = extract_entities(full_text)
        brands, brand_type = map_entities_to_brands(entities)
        if entities is None:
            entities = []
        if brands is None:
            brands = []
        if brand_type is None:
            brand_type = 'unknown'

        # 添加调试信息
        logger.info(f"实体识别结果: {entities}")
        logger.info(f"品牌映射结果: {brands}, 类型: {brand_type}")

        # 3. 主题分类
        topics = classify_topic(full_text)
        logger.info(f"主题分类结果: {topics}")
        logger.info(f"为新闻 '{raw_news.title}' 完成实体识别")

        # 4. 生成看板摘要
        board_summary = generate_board_summary(raw_news.title, raw_news.content)
        logger.info(f"为新闻 '{raw_news.title}' 生成看板摘要完成")

        # 5. 创建处理后的新闻记录
        processed_news = ProcessedNews(
            raw_news_id=raw_news.id,
            title=raw_news.title if raw_news.title is not None else "无标题",
            content=raw_news.content if raw_news.content is not None else "",
            publish_date=raw_news.publish_date,
            involved_entities=entities if entities else [],
            involved_brands=brands if brands else [],
            brand_type=brand_type if brand_type else 'unknown',
            topics=topics if topics else "无主题",
            event_description=summary if summary else "摘要生成失败",
            board_summary=board_summary if board_summary else (raw_news.title[:50] if raw_news.title else "无标题"),
            category='其他'
        )



        # 6. 计算初始分数
        processed_news.news_score = calculate_initial_score(processed_news)

        logger.info(f"新闻 '{raw_news.title}' 处理完成")
        return processed_news

    except Exception as e:
        logger.error(f"处理新闻 '{raw_news.title}' 时发生错误: {e}")
        raise

def process_all_unprocessed_news():
    """
    处理所有未处理的新闻
    """
    app = create_app()

    with app.app_context():
        # 获取所有未处理的新闻
        unprocessed_news = RawNews.query.filter_by(processed=False).all()

        if not unprocessed_news:
            logger.info("没有未处理的新闻")
            return

        logger.info(f"发现 {len(unprocessed_news)} 条未处理的新闻")

        processed_count = 0
        failed_count = 0

        for raw_news in unprocessed_news:
            try:
                # 处理单条新闻
                processed_news = process_single_news(raw_news)

                # 保存到数据库
                db.session.add(processed_news)
                raw_news.processed = True

                processed_count += 1
                logger.info(f"✓ 成功处理: {raw_news.title}")

            except Exception as e:
                failed_count += 1
                logger.error(f"✗ 处理失败: {raw_news.title} - {str(e)}")

        # 提交所有更改
        db.session.commit()

        logger.info(f"处理完成: 成功 {processed_count} 条, 失败 {failed_count} 条")

def display_results():
    """
    显示处理结果
    """
    app = create_app()

    with app.app_context():
        # 显示处理后的新闻详情
        processed_news_list = ProcessedNews.query.all()

        if not processed_news_list:
            logger.info("没有处理后的新闻")
            return

        print("\n" + "="*80)
        print("处理后的新闻详情")
        print("="*80)

        for i, news in enumerate(processed_news_list, 1):
            # 添加空值检查
            title = getattr(news, 'title', '无标题')
            publish_date = getattr(news, 'publish_date', '未知时间')
            content = getattr(news, 'content', '')
            event_description = getattr(news, 'event_description', '无摘要')
            news_score = getattr(news, 'news_score', 0.0)
            involved_brands = getattr(news, 'involved_brands', [])
            brand_type = getattr(news, 'brand_type', '未知品牌类型')
            topics = getattr(news, 'topics', '未知主题')
            category = getattr(news, 'category', '未知分类')
            board_summary = getattr(news, 'board_summary', '无看板摘要')
            involved_entities = getattr(news, 'involved_entities', [])

            print(f"\n  {i}. 标题: {title}")
            print(f"     时间: {publish_date}")
            print(f"     内容: {content[:] if content else ''}...")
            print(f"     百度全文摘要: {event_description}")
            print(f"     评分: {news_score:.2f}")
            print(f"     品牌: {involved_brands}")
            print(f"     品牌类型: {brand_type}")
            print(f"     主题: {topics}")
            print(f"     分类: {category}")
            print(f"     Deepseek看板摘要（待配置密钥）: {board_summary}")
            print(f"     实体: {involved_entities}")
            print("  " + "-"*50)

        # 显示统计信息
        print("\n" + "="*80)
        print("统计信息")
        print("="*80)

        # 按评分排序
        top_news = ProcessedNews.query.order_by(ProcessedNews.news_score.desc()).limit(10).all()

        # 去重：标题和分数都相同的只保留一条
        unique_news = []
        seen_combinations = set()

        for news in top_news:
            title = getattr(news, 'title', '无标题')
            news_score = getattr(news, 'news_score', 0.0)
            # 创建标题和分数的组合键
            combination_key = (title, round(news_score, 2))  # 四舍五入避免浮点数精度问题

            if combination_key not in seen_combinations:
                seen_combinations.add(combination_key)
                unique_news.append(news)

        # 只显示最多3条去重后的新闻
        display_news = unique_news[:6]

        print(f"\n评分最高的 {len(display_news)} 条新闻:")
        for news in display_news:
            title = getattr(news, 'title', '无标题')
            news_score = getattr(news, 'news_score', 0.0)
            print(f"  - {title} (评分: {news_score:.2f})")


        # 按分类统计
        category_stats = db.session.query(
            ProcessedNews.category,
            db.func.count(ProcessedNews.id)
        ).group_by(ProcessedNews.category).all()

        print("\n按分类统计:")
        for category, count in category_stats:
            category = category if category is not None else '未知分类'
            print(f"  - {category}: {count} 条")

        # 按品牌统计
        print("\n按品牌统计:")
        all_brands = []
        for news in ProcessedNews.query.all():
            involved_brands = getattr(news, 'involved_brands', [])
            if involved_brands:
                all_brands.extend(involved_brands)

        brand_count = {}
        for brand in all_brands:
            brand_count[brand] = brand_count.get(brand, 0) + 1

        for brand, count in sorted(brand_count.items(), key=lambda x: x[1], reverse=True):
            print(f"  - {brand}: {count} 条新闻")

        # 按主题统计
        print("\n按主题统计:")
        topic_count = {}
        for news in ProcessedNews.query.all():
            topics = getattr(news, 'topics', [])
            if topics and isinstance(topics, list):
                for topic_info in topics:
                    # 处理不同格式的主题数据
                    if isinstance(topic_info, dict) and 'topic' in topic_info:
                        topic_name = topic_info['topic']
                        topic_count[topic_name] = topic_count.get(topic_name, 0) + 1
                    elif isinstance(topic_info, str):
                        topic_count[topic_info] = topic_count.get(topic_info, 0) + 1

        # 统一输出统计结果
        for topic, count in sorted(topic_count.items(), key=lambda x: x[1], reverse=True):
            print(f"  - {topic}: {count} 条新闻")



def main():
    """
    主函数，直接运行新闻处理流程
    """
    logger.info("开始直接处理新闻流程...")

    try:
        # 填充测试数据
        populate_test_data()

        # 处理所有未处理的新闻
        process_all_unprocessed_news()

        # 显示处理结果
        display_results()

        logger.info("新闻处理流程完成")

    except Exception as e:
        logger.error(f"处理过程中发生错误: {e}")
        return 1

    return 0

if __name__ == '__main__':
    sys.exit(main())
