#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
东方财富年报新闻爬取脚本
独立运行脚本，用于爬取和保存年报预期相关新闻数据
"""

import sys
import os
import argparse
from datetime import datetime
from loguru import logger

# 添加当前目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

# 从新的解析器模块导入（兼容脚本直接运行）
try:
    # 作为包运行
    from .PerformanceForecastParser import PerformanceForecastParser
    from .AssetRestructureParser import AssetRestructureParser
    from .SimpleNewsCollector import SimpleNewsCollector
    from model import DatabaseManager, NewsData
except Exception:
    try:
        # 作为脚本运行（直接从当前目录导入）
        from PerformanceForecastParser import PerformanceForecastParser
        from AssetRestructureParser import AssetRestructureParser
        from SimpleNewsCollector import SimpleNewsCollector
        from model import DatabaseManager, NewsData
    except Exception as e:
        # 如果所有导入都失败，给出明确的错误信息
        logger.error(f"导入失败: {str(e)}")
    try:
        # 作为脚本运行（从项目根目录以包名导入）
        # 修复导入路径，使用正确的相对路径
        import os
        import sys
        # 添加项目根目录到Python路径
        sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
        # 修改第15-18行的相对导入为绝对导入
        try:
            # 作为包运行
            from crawler.PerformanceForecastParser import PerformanceForecastParser
            from crawler.AssetRestructureParser import AssetRestructureParser
            from crawler.SimpleNewsCollector import SimpleNewsCollector
            from model import DatabaseManager, NewsData
        except Exception:
            pass
    except Exception as e:
        logger.error(f"包名导入失败: {str(e)}")

def setup_logger():
    """设置日志"""
    logger.add(
        "logs/news_crawler_{time:YYYY-MM-DD}.log",
        rotation="1 day",
        retention="7 days",
        format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}"
    )

def crawl_with_new_parsers(tags=None, max_pages=2):
    """使用新的解析器爬取新闻"""
    if tags is None:
        tags = ['业绩预告', '资产重组']
    
    all_news = []
    
    # 初始化各个解析器
    parsers = [
        PerformanceForecastParser(),
        AssetRestructureParser()
    ]
    
    # 为每个标签和每个解析器获取数据
    for tag in tags:
        for parser in parsers:
            try:
                logger.info(f"使用 {parser.__class__.__name__} 爬取标签 '{tag}' 的数据...")
                news_list = parser.get_news_list(tag=tag, page=1)
                if news_list:
                    all_news.extend(news_list)
                    logger.info(f"  成功获取 {len(news_list)} 条数据")
                # 添加延时避免请求过于频繁
                import time
                time.sleep(1)
            except Exception as e:
                logger.error(f"使用 {parser.__class__.__name__} 爬取标签 '{tag}' 失败: {str(e)}")
                continue
    
    # 去重
    unique_news = []
    seen_titles = set()
    
    for news in all_news:
        title = news.get('title', '')
        if title and title not in seen_titles:
            seen_titles.add(title)
            unique_news.append(news)
    
    logger.info(f"总共获取 {len(unique_news)} 条唯一新闻")
    return unique_news

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='东方财富年报新闻爬取工具')
    parser.add_argument('--tags', type=str, default='业绩预告,资产重组', 
                       help='爬取标签，用逗号分隔（默认：业绩预告,资产重组）')
    parser.add_argument('--max-pages', type=int, default=2, 
                       help='每个标签最大爬取页数（默认：2）')
    parser.add_argument('--save-db', action='store_true', default=True,
                       help='保存到数据库（默认：True）')
    parser.add_argument('--save-json', type=str, default='',
                       help='保存为JSON文件的路径（可选）')
    parser.add_argument('--verbose', '-v', action='store_true',
                       help='详细输出模式')
    
    args = parser.parse_args()
    
    # 设置日志
    if not args.verbose:
        logger.remove()
        logger.add(sys.stdout, level="INFO")
    
    logger.info("=" * 60)
    logger.info("东方财富年报新闻爬取工具启动")
    logger.info("=" * 60)
    
    try:
        # 解析标签
        tags = [tag.strip() for tag in args.tags.split(',') if tag.strip()]
        logger.info(f"爬取标签: {tags}")
        logger.info(f"每个标签最大页数: {args.max_pages}")
        
        # 使用新的解析器爬取数据
        logger.info("使用新的解析器模块爬取新闻数据...")
        start_time = datetime.now()
        
        news_list = crawl_with_new_parsers(tags=tags, max_pages=args.max_pages)
        
        # 如果新解析器没有获取到数据，使用简化版收集器
        if not news_list:
            logger.info("新解析器未获取到数据，尝试使用简化版收集器...")
            collector = SimpleNewsCollector()
            news_list = collector.collect_financial_news(
                sources=['annual_report', 'general'],
                total_count=20
            )
        
        end_time = datetime.now()
        duration = (end_time - start_time).total_seconds()
        
        logger.info(f"爬取完成！共获取 {len(news_list)} 条新闻，耗时 {duration:.1f} 秒")
        
        if not news_list:
            logger.warning("未获取到任何新闻数据")
            return
        
        # 显示新闻预览
        logger.info("\n新闻预览（前5条）:")
        logger.info("-" * 80)
        for i, news in enumerate(news_list[:5], 1):
            logger.info(f"[{i}] {news.get('title', '无标题')}")
            logger.info(f"    来源: {news.get('source', '未知')} | 时间: {news.get('publish_time', '未知')}")
            if news.get('summary'):
                summary = news.get('summary', '')[:100]
                logger.info(f"    摘要: {summary}{'...' if len(news.get('summary', '')) > 100 else ''}")
            logger.info("")
        
        # 保存到数据库
        if args.save_db:
            try:
                logger.info("初始化数据库...")
                db_manager = DatabaseManager()
                db_manager.init_databases()
                NewsData.set_db_manager(db_manager)
                
                logger.info("保存新闻到数据库...")
                # 过滤/映射到 NewsData 可接受的字段
                filtered_list = []
                for n in news_list:
                    # 规范化基本字段
                    filtered = {
                        'title': n.get('title'),
                        'summary': (n.get('summary') or '') if n.get('summary') is not None else None,
                        'content': (n.get('content') or '') if n.get('content') is not None else None,
                        'url': n.get('url'),
                        'source': n.get('source') or '东方财富',
                        'author': (n.get('author') or '') if n.get('author') is not None else None,
                        'publish_time': n.get('publish_time'),
                        'crawl_time': n.get('crawl_time'),
                        'category': n.get('category'),
                        # keywords 统一保存为JSON字符串
                        'keywords': None,
                    }
                    # 将 stock_code(s) 放到 stock_codes(JSON)
                    stock_codes = []
                    if n.get('stock_code'):
                        stock_codes.append(str(n.get('stock_code')))
                    if n.get('stock_codes') and isinstance(n.get('stock_codes'), list):
                        stock_codes.extend([str(x) for x in n.get('stock_codes')])
                    if stock_codes:
                        try:
                            import json as _json
                            filtered['stock_codes'] = _json.dumps(list(dict.fromkeys(stock_codes)))
                        except Exception:
                            pass

                    # 处理keywords
                    kws = n.get('keywords')
                    if kws is not None:
                        try:
                            import json as _json
                            if isinstance(kws, (list, dict)):
                                filtered['keywords'] = _json.dumps(kws, ensure_ascii=False)
                            else:
                                filtered['keywords'] = str(kws)
                        except Exception:
                            filtered['keywords'] = None
                    filtered_list.append(filtered)

                saved_count = NewsData.save_news_batch(filtered_list)
                logger.info(f"成功保存 {saved_count} 条新闻到数据库")
                
            except Exception as e:
                logger.error(f"保存到数据库失败: {str(e)}")
        
        # 保存为JSON文件
        if args.save_json:
            try:
                import json
                
                logger.info(f"保存新闻到JSON文件: {args.save_json}")
                
                # 确保目录存在
                os.makedirs(os.path.dirname(args.save_json), exist_ok=True)
                
                # 添加爬取元数据
                output_data = {
                    'crawl_info': {
                        'crawl_time': datetime.now().isoformat(),
                        'tags': tags,
                        'max_pages': args.max_pages,
                        'total_count': len(news_list),
                        'duration_seconds': duration
                    },
                    'news_list': news_list
                }
                
                with open(args.save_json, 'w', encoding='utf-8') as f:
                    json.dump(output_data, f, ensure_ascii=False, indent=2)
                
                logger.info(f"成功保存到文件: {args.save_json}")
                
            except Exception as e:
                logger.error(f"保存到JSON文件失败: {str(e)}")
        
        # 统计信息
        logger.info("\n" + "=" * 60)
        logger.info("爬取统计信息:")
        logger.info("=" * 60)
        logger.info(f"总新闻数量: {len(news_list)}")
        
        # 按来源统计
        sources = {}
        for news in news_list:
            source = news.get('source', '未知来源')
            sources[source] = sources.get(source, 0) + 1
        
        logger.info("按来源分布:")
        for source, count in sorted(sources.items(), key=lambda x: x[1], reverse=True)[:10]:
            logger.info(f"  {source}: {count} 条")
        
        # 按分类统计
        categories = {}
        for news in news_list:
            category = news.get('category', '未分类')
            if category:
                categories[category] = categories.get(category, 0) + 1
        
        if categories:
            logger.info("按分类分布:")
            for category, count in sorted(categories.items(), key=lambda x: x[1], reverse=True)[:10]:
                logger.info(f"  {category}: {count} 条")
        
        logger.info(f"爬取耗时: {duration:.1f} 秒")
        logger.info(f"平均速度: {len(news_list)/duration:.1f} 条/秒")
        
        logger.info("\n新闻爬取任务完成！")
        
    except KeyboardInterrupt:
        logger.warning("用户中断了爬取任务")
    except Exception as e:
        logger.error(f"爬取失败: {str(e)}")
        import traceback
        if args.verbose:
            logger.error(f"详细错误信息:\n{traceback.format_exc()}")
        sys.exit(1)

if __name__ == "__main__":
    # 确保日志目录存在
    os.makedirs("logs", exist_ok=True)
    
    # 设置日志
    setup_logger()
    
    # 运行主程序
    main()