# -*- coding: utf-8 -*-
"""
彩票爬虫管理器
统一管理所有彩种的爬虫
"""

import asyncio
import time
from datetime import datetime, timedelta
from typing import Dict, List, Optional
from concurrent.futures import ThreadPoolExecutor, as_completed
from loguru import logger

from .spiders.ssq_spider import SSQSpider
from .spiders.dlt_spider import DLTSpider
from .spiders.fc3d_spider import FC3DSpider
from .spiders.pl3_spider import PL3Spider
from .spiders.pl5_spider import PL5Spider
from .spiders.qxc_spider import QXCSpider
from .spiders.qlc_spider import QLCSpider
from .spiders.kl8_spider import KL8Spider
from .database import db_manager
from .config.settings import LOTTERY_TYPES, CRAWLER_CONFIG


class CrawlerManager:
    """爬虫管理器"""
    
    def __init__(self):
        # 所有可用的爬虫类
        all_spiders = {
            'ssq': SSQSpider,
            'dlt': DLTSpider,
            'fc3d': FC3DSpider,
            'qlc': QLCSpider,     # 七乐彩
            'pl3': PL3Spider,     # 排列3
            'pl5': PL5Spider,     # 排列5
            'qxc': QXCSpider,     # 七星彩
            'kl8': KL8Spider,     # 快乐8
        }
        
        # 只加载启用的彩种爬虫
        self.spiders = {}
        for lottery_type, config in LOTTERY_TYPES.items():
            if config.get('enabled', False) and lottery_type in all_spiders:
                self.spiders[lottery_type] = all_spiders[lottery_type]
        
        self.executor = ThreadPoolExecutor(max_workers=CRAWLER_CONFIG['max_concurrent'])
    
    def get_spider(self, lottery_type: str):
        """获取指定彩种的爬虫实例"""
        spider_class = self.spiders.get(lottery_type)
        if spider_class:
            return spider_class()
        else:
            raise ValueError(f"不支持的彩种: {lottery_type}")
    
    def crawl_single_lottery(self, lottery_type: str, mode: str = 'latest', **kwargs) -> Dict:
        """爬取单个彩种数据"""
        try:
            spider = self.get_spider(lottery_type)
            start_time = time.time()
            
            logger.info(f"开始爬取{lottery_type}数据，模式: {mode}")
            
            saved_count = spider.run(mode=mode, **kwargs)
            
            end_time = time.time()
            duration = end_time - start_time
            
            result = {
                'lottery_type': lottery_type,
                'mode': mode,
                'saved_count': saved_count,
                'duration': duration,
                'success': saved_count > 0,
                'timestamp': datetime.now().isoformat()
            }
            
            logger.info(f"{lottery_type}爬取完成，保存{saved_count}条记录，耗时{duration:.2f}秒")
            
            return result
            
        except Exception as e:
            logger.error(f"{lottery_type}爬取失败: {e}")
            return {
                'lottery_type': lottery_type,
                'mode': mode,
                'saved_count': 0,
                'duration': 0,
                'success': False,
                'error': str(e),
                'timestamp': datetime.now().isoformat()
            }
    
    def crawl_multiple_lotteries(self, lottery_types: List[str], mode: str = 'latest', **kwargs) -> List[Dict]:
        """并发爬取多个彩种数据"""
        logger.info(f"开始并发爬取{len(lottery_types)}个彩种数据")
        
        futures = []
        for lottery_type in lottery_types:
            future = self.executor.submit(
                self.crawl_single_lottery, 
                lottery_type, 
                mode, 
                **kwargs
            )
            futures.append(future)
        
        results = []
        for future in as_completed(futures):
            try:
                result = future.result()
                results.append(result)
            except Exception as e:
                logger.error(f"并发爬取任务失败: {e}")
        
        return results
    
    def crawl_all_latest(self, count: int = 30) -> List[Dict]:
        """爬取所有彩种的最新数据"""
        lottery_types = list(self.spiders.keys())
        return self.crawl_multiple_lotteries(
            lottery_types, 
            mode='latest', 
            count=count
        )
    
    def crawl_all_history(self, start_year: int = None, end_year: int = None) -> List[Dict]:
        """爬取所有彩种的历史数据"""
        if start_year is None:
            start_year = 2003  # 最早的彩种开始年份
        
        if end_year is None:
            end_year = datetime.now().year
        
        lottery_types = list(self.spiders.keys())
        return self.crawl_multiple_lotteries(
            lottery_types,
            mode='history',
            start_year=start_year,
            end_year=end_year
        )
    
    def get_crawl_status(self) -> Dict:
        """获取爬取状态统计"""
        status = {
            'supported_lotteries': list(self.spiders.keys()),
            'database_status': db_manager.test_connection(),
            'redis_status': db_manager.test_connection(),
            'latest_periods': {},
            'total_records': {}
        }
        
        # 获取每个彩种的最新期号和总记录数
        for lottery_type in self.spiders.keys():
            try:
                latest_period = db_manager.get_latest_period(lottery_type)
                status['latest_periods'][lottery_type] = latest_period
                
                # 获取总记录数
                query = "SELECT COUNT(*) as count FROM lottery_results WHERE lottery_type = %s"
                result = db_manager.execute_query(query, (lottery_type,))
                count = result[0]['count'] if result else 0
                status['total_records'][lottery_type] = count
                
            except Exception as e:
                logger.error(f"获取{lottery_type}状态失败: {e}")
                status['latest_periods'][lottery_type] = None
                status['total_records'][lottery_type] = 0
        
        return status
    
    def schedule_crawl(self, lottery_types: List[str] = None, interval_hours: int = 1):
        """定时爬取任务"""
        if lottery_types is None:
            lottery_types = list(self.spiders.keys())
        
        logger.info(f"启动定时爬取任务，彩种: {lottery_types}，间隔: {interval_hours}小时")
        
        while True:
            try:
                # 爬取最新数据
                results = self.crawl_multiple_lotteries(lottery_types, mode='latest', count=5)
                
                # 记录爬取结果
                success_count = sum(1 for r in results if r['success'])
                logger.info(f"定时爬取完成，成功: {success_count}/{len(results)}")
                
                # 等待下次爬取
                time.sleep(interval_hours * 3600)
                
            except KeyboardInterrupt:
                logger.info("定时爬取任务被中断")
                break
            except Exception as e:
                logger.error(f"定时爬取任务异常: {e}")
                time.sleep(300)  # 出错后等待5分钟再重试
    
    def cleanup(self):
        """清理资源"""
        self.executor.shutdown(wait=True)
        logger.info("爬虫管理器资源清理完成")


def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='彩票数据爬虫')
    parser.add_argument('--mode', choices=['latest', 'history', 'history-smart', 'schedule', 'status'], 
                       default='latest', help='运行模式')
    parser.add_argument('--lottery', nargs='+', help='指定彩种 (ssq, dlt)')
    parser.add_argument('--count', type=int, default=30, help='爬取最新数据的期数')
    parser.add_argument('--start-year', type=int, help='历史数据开始年份')
    parser.add_argument('--end-year', type=int, help='历史数据结束年份')
    parser.add_argument('--interval', type=int, default=1, help='定时爬取间隔(小时)')
    
    args = parser.parse_args()
    
    # 配置日志
    logger.add(
        "logs/crawler_{time:YYYY-MM-DD}.log",
        rotation="1 day",
        retention="30 days",
        level="INFO"
    )
    
    manager = CrawlerManager()

    try:
        if args.mode == 'status':
            # 显示状态
            status = manager.get_crawl_status()
            print("\n=== 爬虫状态 ===")
            print(f"支持的彩种: {', '.join(status['supported_lotteries'])}")
            print(f"数据库连接: {'正常' if status['database_status'] else '异常'}")
            print(f"Redis连接: {'正常' if status['redis_status'] else '异常'}")
            print("\n最新期号:")
            for lottery_type, period in status['latest_periods'].items():
                count = status['total_records'][lottery_type]
                print(f"  {lottery_type}: {period} (共{count}条记录)")
        
        elif args.mode == 'latest':
            # 爬取最新数据
            lottery_types = args.lottery or list(manager.spiders.keys())
            
            # 如果没有指定count参数，默认使用1条数据（query端点获取最新数据）
            if args.count == 30:  # 默认值，说明用户没有指定
                # 使用1条数据，确保使用query端点获取最新单条数据
                results = []
                for lottery_type in lottery_types:
                    result = manager.crawl_single_lottery(
                        lottery_type, 
                        mode='latest', 
                        count=1  # 使用query端点获取单条最新数据
                    )
                    results.append(result)
            else:
                # 用户指定了count，使用指定值
                results = manager.crawl_multiple_lotteries(
                    lottery_types, 
                    mode='latest', 
                    count=args.count
                )
            
            print("\n=== 爬取结果 ===")
            for result in results:
                status = '成功' if result['success'] else '失败'
                print(f"{result['lottery_type']}: {status}, 保存{result['saved_count']}条")
        
        elif args.mode == 'history':
            # 爬取历史数据
            lottery_types = args.lottery or list(manager.spiders.keys())
            results = manager.crawl_multiple_lotteries(
                lottery_types,
                mode='history',
                start_year=args.start_year,
                end_year=args.end_year
            )
            
            print("\n=== 历史数据爬取结果 ===")
            for result in results:
                status = '成功' if result['success'] else '失败'
                print(f"{result['lottery_type']}: {status}, 保存{result['saved_count']}条")
        
        elif args.mode == 'history-smart':
            # 智能抓取历史数据（从最早期号往前补充）
            lottery_types = args.lottery or list(manager.spiders.keys())
            
            print(f"开始智能抓取历史数据: {', '.join(lottery_types)}")
            total_crawled = 0  # 直接累加，不使用列表
            
            for lottery_type in lottery_types:
                if lottery_type in manager.spiders:
                    try:
                        # 使用上下文管理器确保资源正确释放
                        with manager.get_spider(lottery_type) as spider:
                            if hasattr(spider, 'crawl_history_smart'):
                                config_count = LOTTERY_TYPES.get(lottery_type, {}).get('daily_fetch_count', 30)
                                count = args.count if args.count != 30 else config_count
                                print(f"开始抓取 {lottery_type}，count={count}")    
                                result = spider.crawl_history_smart(count)
                                total_crawled += result  # 直接累加，避免列表累积
                                print(f"  {lottery_type}: 智能抓取历史数据 {result} 条")
                            else:
                                print(f"  {lottery_type}: 不支持智能历史数据抓取")
                    except Exception as e:
                        logger.error(f"抓取 {lottery_type} 失败: {e}")
                        print(f"  {lottery_type}: 抓取失败 - {e}")
                else:
                    print(f"  {lottery_type}: 爬虫未找到")
            
            print(f"\\n智能历史数据抓取完成，总计: {total_crawled} 条")
        
        elif args.mode == 'schedule':
            # 定时爬取
            lottery_types = args.lottery or list(manager.spiders.keys())
            manager.schedule_crawl(lottery_types, args.interval)
    
    except KeyboardInterrupt:
        logger.info("程序被用户中断")
    except Exception as e:
        logger.error(f"程序运行异常: {e}")
    finally:
        manager.cleanup()


if __name__ == '__main__':
    main()