# -*- coding: utf-8 -*-
"""
主爬虫管理器
统一管理所有彩种的数据抓取工作
"""

import sys
import os
import time
from datetime import datetime, date
from typing import Dict, List, Optional
from loguru import logger
from concurrent.futures import ThreadPoolExecutor, as_completed

# 添加项目根目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from spiders.ssq_spider import SSQSpider
from spiders.dlt_spider import DLTSpider
from spiders.fc3d_spider import FC3DSpider
from database import db_manager
from config.settings import LOTTERY_TYPES, CRAWLER_CONFIG


class MainCrawler:
    """主爬虫管理器"""
    
    def __init__(self):
        self.spiders = {
            '双色球': SSQSpider(),
            '大乐透': DLTSpider(),
            '福彩3D': FC3DSpider()
        }
        
        # 配置日志
        self.setup_logging()
        
    def setup_logging(self):
        """配置日志"""
        log_dir = os.path.join(os.path.dirname(__file__), '..', 'logs')
        os.makedirs(log_dir, exist_ok=True)
        
        logger.add(
            os.path.join(log_dir, "main_crawler.log"),
            rotation="1 day",
            retention="30 days",
            format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {name}:{function}:{line} | {message}"
        )
        
    def crawl_latest_all(self) -> Dict[str, bool]:
        """爬取所有彩种的最新开奖结果"""
        logger.info("开始爬取所有彩种最新开奖结果")
        
        results = {}
        
        for lottery_name, spider in self.spiders.items():
            try:
                logger.info(f"开始爬取{lottery_name}最新开奖结果")
                success = spider.crawl_latest()
                results[lottery_name] = success
                
                if success:
                    logger.info(f"{lottery_name}最新开奖结果爬取成功")
                else:
                    logger.error(f"{lottery_name}最新开奖结果爬取失败")
                    
                # 延迟避免请求过快
                time.sleep(CRAWLER_CONFIG.get('request_delay', 1))
                
            except Exception as e:
                logger.error(f"爬取{lottery_name}最新开奖结果异常: {e}")
                results[lottery_name] = False
                
        logger.info(f"所有彩种最新开奖结果爬取完成: {results}")
        return results
        
    def crawl_history_all(self, year: int) -> Dict[str, bool]:
        """爬取所有彩种指定年份的历史数据"""
        logger.info(f"开始爬取所有彩种{year}年历史数据")
        
        results = {}
        
        for lottery_name, spider in self.spiders.items():
            try:
                logger.info(f"开始爬取{lottery_name} {year}年历史数据")
                success = spider.crawl_history(year=year)
                results[lottery_name] = success
                
                if success:
                    logger.info(f"{lottery_name} {year}年历史数据爬取成功")
                else:
                    logger.error(f"{lottery_name} {year}年历史数据爬取失败")
                    
                # 延迟避免请求过快
                time.sleep(CRAWLER_CONFIG.get('request_delay', 2))
                
            except Exception as e:
                logger.error(f"爬取{lottery_name} {year}年历史数据异常: {e}")
                results[lottery_name] = False
                
        logger.info(f"所有彩种{year}年历史数据爬取完成: {results}")
        return results
        
    def crawl_history_parallel(self, year: int, max_workers: int = 3) -> Dict[str, bool]:
        """并行爬取所有彩种指定年份的历史数据"""
        logger.info(f"开始并行爬取所有彩种{year}年历史数据")
        
        results = {}
        
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 提交任务
            future_to_lottery = {
                executor.submit(spider.crawl_history, year=year): lottery_name
                for lottery_name, spider in self.spiders.items()
            }
            
            # 收集结果
            for future in as_completed(future_to_lottery):
                lottery_name = future_to_lottery[future]
                try:
                    success = future.result()
                    results[lottery_name] = success
                    
                    if success:
                        logger.info(f"{lottery_name} {year}年历史数据爬取成功")
                    else:
                        logger.error(f"{lottery_name} {year}年历史数据爬取失败")
                        
                except Exception as e:
                    logger.error(f"爬取{lottery_name} {year}年历史数据异常: {e}")
                    results[lottery_name] = False
                    
        logger.info(f"所有彩种{year}年历史数据并行爬取完成: {results}")
        return results
        
    def crawl_single_lottery(self, lottery_name: str, mode: str = 'latest', **kwargs) -> bool:
        """爬取单个彩种数据"""
        if lottery_name not in self.spiders:
            logger.error(f"不支持的彩种: {lottery_name}")
            return False
            
        spider = self.spiders[lottery_name]
        
        try:
            if mode == 'latest':
                return spider.crawl_latest()
            elif mode == 'history':
                year = kwargs.get('year')
                start_period = kwargs.get('start_period')
                end_period = kwargs.get('end_period')
                
                if year:
                    return spider.crawl_history(year=year)
                elif start_period and end_period:
                    return spider.crawl_history(start_period=start_period, end_period=end_period)
                else:
                    logger.error("历史模式需要指定年份或期号范围")
                    return False
            else:
                logger.error(f"不支持的爬取模式: {mode}")
                return False
                
        except Exception as e:
            logger.error(f"爬取{lottery_name}数据失败: {e}")
            return False
            
    def get_crawler_status(self) -> Dict[str, Dict]:
        """获取爬虫状态"""
        status = {}
        
        for lottery_name in self.spiders.keys():
            try:
                # 查询最近的爬虫任务状态
                sql = """
                SELECT task_name, task_type, status, success_count, error_message, 
                       created_at, updated_at
                FROM crawler_tasks 
                WHERE lottery_type = %s 
                ORDER BY created_at DESC 
                LIMIT 5
                """
                
                results = db_manager.execute_query(sql, (lottery_name,))
                
                status[lottery_name] = {
                    'recent_tasks': results if results else [],
                    'spider_available': True
                }
                
            except Exception as e:
                logger.error(f"获取{lottery_name}爬虫状态失败: {e}")
                status[lottery_name] = {
                    'recent_tasks': [],
                    'spider_available': False,
                    'error': str(e)
                }
                
        return status
        
    def cleanup_old_tasks(self, days: int = 30):
        """清理旧的爬虫任务记录"""
        try:
            sql = """
            DELETE FROM crawler_tasks 
            WHERE created_at < DATE_SUB(NOW(), INTERVAL %s DAY)
            """
            
            affected_rows = db_manager.execute_sql(sql, (days,))
            logger.info(f"清理了{affected_rows}条{days}天前的爬虫任务记录")
            
        except Exception as e:
            logger.error(f"清理旧任务记录失败: {e}")
            
    def run_daily_crawl(self):
        """运行日常爬取任务"""
        logger.info("开始运行日常爬取任务")
        
        # 1. 爬取最新开奖结果
        latest_results = self.crawl_latest_all()
        
        # 2. 清理旧任务记录
        self.cleanup_old_tasks()
        
        # 3. 统计结果
        success_count = sum(1 for success in latest_results.values() if success)
        total_count = len(latest_results)
        
        logger.info(f"日常爬取任务完成: {success_count}/{total_count} 个彩种成功")
        
        return latest_results
        
    def run_history_crawl(self, start_year: int = 2020, end_year: Optional[int] = None, parallel: bool = False):
        """运行历史数据爬取任务"""
        if end_year is None:
            end_year = datetime.now().year
            
        logger.info(f"开始运行历史数据爬取任务: {start_year}-{end_year}年")
        
        all_results = {}
        
        for year in range(start_year, end_year + 1):
            logger.info(f"开始爬取{year}年历史数据")
            
            if parallel:
                year_results = self.crawl_history_parallel(year)
            else:
                year_results = self.crawl_history_all(year)
                
            all_results[year] = year_results
            
            # 年份间延迟
            if year < end_year:
                time.sleep(5)
                
        logger.info(f"历史数据爬取任务完成: {all_results}")
        return all_results


def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='彩票数据爬虫')
    parser.add_argument('--mode', choices=['latest', 'history', 'daily'], default='latest',
                       help='爬取模式: latest(最新), history(历史), daily(日常)')
    parser.add_argument('--lottery', choices=['双色球', '大乐透', '福彩3D', 'all'], default='all',
                       help='彩种选择')
    parser.add_argument('--year', type=int, help='年份(历史模式)')
    parser.add_argument('--start-year', type=int, default=2020, help='开始年份(历史模式)')
    parser.add_argument('--end-year', type=int, help='结束年份(历史模式)')
    parser.add_argument('--start-period', help='开始期号')
    parser.add_argument('--end-period', help='结束期号')
    parser.add_argument('--parallel', action='store_true', help='并行爬取')
    parser.add_argument('--status', action='store_true', help='查看爬虫状态')
    
    args = parser.parse_args()
    
    crawler = MainCrawler()
    
    if args.status:
        # 查看状态
        status = crawler.get_crawler_status()
        print("\n=== 爬虫状态 ===")
        for lottery_name, info in status.items():
            print(f"\n{lottery_name}:")
            if info['spider_available']:
                print(f"  状态: 可用")
                print(f"  最近任务数: {len(info['recent_tasks'])}")
                if info['recent_tasks']:
                    latest_task = info['recent_tasks'][0]
                    print(f"  最新任务: {latest_task['task_name']} - {latest_task['status']}")
            else:
                print(f"  状态: 不可用 - {info.get('error', '未知错误')}")
        return
    
    if args.mode == 'latest':
        if args.lottery == 'all':
            results = crawler.crawl_latest_all()
        else:
            success = crawler.crawl_single_lottery(args.lottery, 'latest')
            results = {args.lottery: success}
            
    elif args.mode == 'history':
        if args.lottery == 'all':
            if args.year:
                if args.parallel:
                    results = crawler.crawl_history_parallel(args.year)
                else:
                    results = crawler.crawl_history_all(args.year)
            else:
                results = crawler.run_history_crawl(
                    start_year=args.start_year,
                    end_year=args.end_year,
                    parallel=args.parallel
                )
        else:
            kwargs = {}
            if args.year:
                kwargs['year'] = args.year
            elif args.start_period and args.end_period:
                kwargs['start_period'] = args.start_period
                kwargs['end_period'] = args.end_period
            else:
                print("历史模式需要指定年份或期号范围")
                return
                
            success = crawler.crawl_single_lottery(args.lottery, 'history', **kwargs)
            results = {args.lottery: success}
            
    elif args.mode == 'daily':
        results = crawler.run_daily_crawl()
        
    else:
        print(f"不支持的模式: {args.mode}")
        return
        
    # 输出结果
    print(f"\n=== 爬取结果 ===")
    if isinstance(results, dict):
        for key, value in results.items():
            if isinstance(value, dict):
                print(f"{key}: {value}")
            else:
                print(f"{key}: {'成功' if value else '失败'}")
    else:
        print(f"结果: {results}")


if __name__ == '__main__':
    main()