# -*- coding: utf-8 -*-

import os
import sys
import argparse
import logging
from pathlib import Path
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings


# 添加项目路径到Python路径
project_dir = Path(__file__).parent
sys.path.insert(0, str(project_dir))

from config import ConfigManager, SpiderConfig
from fire_control_spider.sites.beijingfire import BeijingFireSiteSpider
from fire_control_spider.sites.londonfire import LondonFireSiteSpider
from fire_control_spider.sites.cccf import CCCFSiteSpider
from fire_control_spider.sites.nist import NISTSiteSpider
from fire_control_spider.sites.iaff import IAFFSiteSpider
from fire_control_spider.sites.sfpe import SFPESiteSpider
from fire_control_spider.sites.nfpa import NFPASiteSpider


class SpiderManager:
    """爬虫管理器"""
    
    def __init__(self):
        self.config_manager = ConfigManager()
        self.logger = self._setup_logger()
        
        # 注册可用的爬虫类 - 只保留新的模块化架构
        self.spider_classes = {
            'beijingfire': BeijingFireSiteSpider,  # 北京消防协会模块化爬虫
            'londonfire': LondonFireSiteSpider,  # 伦敦消防站点爬虫
            'cccf': CCCFSiteSpider,  # CCCF站点爬虫
            'nist': NISTSiteSpider,  # NIST站点爬虫
            'iaff': IAFFSiteSpider,  # IAFF站点爬虫
            'sfpe': SFPESiteSpider,  # SFPE站点爬虫
    'nfpa': NFPASiteSpider,  # NFPA站点爬虫
        }
    
    def _setup_logger(self):
        """设置日志 - 完全禁用"""
        # 完全禁用所有日志输出
        logging.getLogger().setLevel(logging.CRITICAL)
        
        # 禁用Scrapy相关模块的日志，但保留自定义日志
        scrapy_loggers = [
            'scrapy', 'scrapy.extensions', 'scrapy.middleware', 'scrapy.crawler',
            'scrapy.core', 'scrapy.core.engine', 'scrapy.core.scraper', 'scrapy.utils',
            'scrapy.statscollectors', 'scrapy.extensions.httpcache', 'scrapy.extensions.logstats',
            'scrapy.extensions.corestats', 'scrapy.extensions.memusage',
            'scrapy.extensions.memdebug', 'scrapy.extensions.closespider',
            'scrapy.extensions.feedexport', 'urllib3', 'twisted', 'chardet'
        ]
        
        for logger_name in scrapy_loggers:
            logging.getLogger(logger_name).setLevel(logging.CRITICAL)
            logging.getLogger(logger_name).propagate = False
        
        # 允许fire_control_spider模块的INFO级别日志
        logging.getLogger('fire_control_spider').setLevel(logging.INFO)
        
        # 设置根日志器为INFO级别，允许自定义日志输出
        logging.getLogger().setLevel(logging.INFO)
        
        # 使用空处理器，不输出任何内容
        logging.basicConfig(
            level=logging.CRITICAL,
            format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
            handlers=[
                logging.FileHandler(self.config_manager.global_config.log_file, encoding='utf-8'),
                logging.StreamHandler(sys.stdout)
            ]
        )
        return logging.getLogger(__name__)
    
    def list_spiders(self):
        """列出所有可用的爬虫"""
        print("可用的爬虫:")
        print("=" * 50)
        
        for config in self.config_manager.spider_configs:
            print(f"名称: {config.name}")
            print(f"站点: {config.site_name}")
            print(f"并发数: {config.concurrent_requests}")
            print(f"延迟: {config.download_delay}秒")
            print("-" * 30)
    
    def run_spider(self, spider_name: str, clean_outputs=False, **kwargs):
        """运行指定的爬虫"""
        # 获取爬虫配置
        spider_config = self.config_manager.get_spider_config(spider_name)
        if not spider_config:
            self.logger.error(f"未找到爬虫配置: {spider_name}")
            return False
        
        # 获取爬虫类
        spider_class = self.spider_classes.get(spider_name)
        if not spider_class:
            self.logger.error(f"未找到爬虫类: {spider_name}")
            return False
        
        # 如果需要清理outputs，先执行清理
        if clean_outputs:
            self.logger.info(f"开始清理站点 {spider_config.site_name} 的输出目录...")
            self.clean_outputs(spider_config.site_name)
        
        # 准备Scrapy设置
        settings = self.config_manager.get_scrapy_settings()
        
        # 应用爬虫特定设置
        settings.update({
            'CONCURRENT_REQUESTS': spider_config.concurrent_requests,
            'DOWNLOAD_DELAY': spider_config.download_delay,
        })
        
        # 应用命令行参数覆盖
        for key, value in kwargs.items():
            if key in settings:
                settings[key] = value
        
        try:
            # 创建并启动爬虫进程
            process = CrawlerProcess(settings)
            
            # 添加爬虫到进程，传递配置参数
            process.crawl(
                spider_class,
                site_name=spider_config.site_name,
                allowed_domains=spider_config.allowed_domains,
                **kwargs
            )
            
            self.logger.info(f"开始运行爬虫: {spider_name}")
            process.start()
            
            return True
            
        except Exception as e:
            self.logger.error(f"运行爬虫失败: {e}")
            return False
    
    def run_multiple_spiders(self, spider_names: list, clean_outputs=False, **kwargs):
        """并行运行多个爬虫"""
        import multiprocessing
        from concurrent.futures import ProcessPoolExecutor
        
        # 如果需要清理outputs，先执行清理
        if clean_outputs:
            self.logger.info("开始清理所有站点的输出目录...")
            self.clean_outputs()
        
        self.logger.info(f"准备并行运行 {len(spider_names)} 个爬虫")
        
        with ProcessPoolExecutor(max_workers=len(spider_names)) as executor:
            futures = []
            for spider_name in spider_names:
                future = executor.submit(self.run_spider, spider_name, clean_outputs=False, **kwargs)
                futures.append((spider_name, future))
            
            # 等待所有爬虫完成
            for spider_name, future in futures:
                try:
                    result = future.result()
                    if result:
                        self.logger.info(f"爬虫 {spider_name} 运行成功")
                    else:
                        self.logger.error(f"爬虫 {spider_name} 运行失败")
                except Exception as e:
                    self.logger.error(f"爬虫 {spider_name} 运行异常: {e}")
    
    def add_spider_config(self, name: str, site_name: str, **kwargs):
        """添加新的爬虫配置"""
        config = SpiderConfig(
            name=name,
            site_name=site_name,
            **kwargs
        )
        
        self.config_manager.add_spider_config(config)
        self.logger.info(f"已添加爬虫配置: {name}")
    
    def remove_spider_config(self, name: str):
        """删除爬虫配置"""
        self.config_manager.remove_spider_config(name)
        self.logger.info(f"已删除爬虫配置: {name}")
    
    def create_output_structure(self):
        """创建输出目录结构"""
        output_dir = Path(self.config_manager.global_config.output_dir)
        output_dir.mkdir(exist_ok=True)
        
        for config in self.config_manager.spider_configs:
            site_dir = output_dir / config.site_name
            dirs_to_create = [
                site_dir,
                site_dir / "image",
                site_dir / "video",
                site_dir / "audio", 
                site_dir / "main_file",
                site_dir / "attachment_file",
                site_dir / "image_urls",
                site_dir / "video_urls",
                site_dir / "audio_urls",
                site_dir / "jsonl"
            ]
            
            for directory in dirs_to_create:
                directory.mkdir(parents=True, exist_ok=True)
        
        self.logger.info("输出目录结构创建完成")
    
    def clean_outputs(self, site_name: str = None):
        """清理outputs目录中的旧数据"""
        output_dir = Path(self.config_manager.global_config.output_dir)
        
        if not output_dir.exists():
            self.logger.info("outputs目录不存在，无需清理")
            return
        
        cleaned_count = 0
        total_size = 0
        
        if site_name:
            # 只清理指定站点的目录
            site_dir = output_dir / site_name
            if site_dir.exists() and site_dir.is_dir():
                self.logger.info(f"清理站点目录: {site_name}")
                cleaned_count, total_size = self._clean_site_directory(site_dir)
            else:
                self.logger.info(f"站点目录不存在: {site_name}")
        else:
            # 清理所有站点目录
            for site_dir in output_dir.iterdir():
                if not site_dir.is_dir():
                    continue
                    
                self.logger.info(f"清理站点目录: {site_dir.name}")
                site_cleaned_count, site_total_size = self._clean_site_directory(site_dir)
                cleaned_count += site_cleaned_count
                total_size += site_total_size
        
        # 转换文件大小为可读格式
        def format_size(size_bytes):
            if size_bytes == 0:
                return "0B"
            size_names = ["B", "KB", "MB", "GB"]
            i = 0
            while size_bytes >= 1024 and i < len(size_names) - 1:
                size_bytes /= 1024.0
                i += 1
            return f"{size_bytes:.1f}{size_names[i]}"
        
        if site_name:
            self.logger.info(f"清理站点 {site_name} 完成: 删除了 {cleaned_count} 个文件，释放了 {format_size(total_size)} 空间")
        else:
            self.logger.info(f"清理完成: 删除了 {cleaned_count} 个文件，释放了 {format_size(total_size)} 空间")
    
    def _clean_site_directory(self, site_dir: Path) -> tuple:
        """清理单个站点目录"""
        cleaned_count = 0
        total_size = 0
        
        # 清理各个子目录
        subdirs = ['image', 'video', 'audio', 'main_file', 'attachment_file','video_urls','audio_urls', 'image_urls', 'jsonl']
        for subdir_name in subdirs:
            subdir = site_dir / subdir_name
            if subdir.exists():
                # 统计文件大小
                for file_path in subdir.rglob('*'):
                    if file_path.is_file():
                        total_size += file_path.stat().st_size
                        file_path.unlink()
                        cleaned_count += 1
                
                # 删除空目录
                if subdir.exists():
                    subdir.rmdir()
        
        # 如果站点目录为空，也删除它
        if site_dir.exists() and not any(site_dir.iterdir()):
            site_dir.rmdir()
            self.logger.info(f"删除空站点目录: {site_dir.name}")
        
        return cleaned_count, total_size


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='Fire Control Spider - 多站点爬虫系统')
    parser.add_argument('command', choices=['list', 'run', 'add', 'remove', 'init'], 
                       help='命令类型')
    
    # 运行爬虫的参数
    parser.add_argument('--spider', '-s', help='爬虫名称')
    parser.add_argument('--all', action='store_true', help='运行所有爬虫')
    parser.add_argument('--concurrent', type=int, help='并发请求数')
    parser.add_argument('--delay', type=float, help='下载延迟（秒）')
    parser.add_argument('--clean-outputs', action='store_true', help='清理outputs目录中的旧数据')
    
    # 添加爬虫配置的参数
    parser.add_argument('--name', help='爬虫名称')
    parser.add_argument('--site-name', help='站点名称')
    parser.add_argument('--allowed-domains', nargs='+', help='允许的域名')
    
    args = parser.parse_args()
    
    # 创建爬虫管理器
    manager = SpiderManager()
    
    if args.command == 'list':
        # 列出所有爬虫
        manager.list_spiders()
        
    elif args.command == 'run':
        # 运行爬虫
        kwargs = {}
        if args.concurrent:
            kwargs['CONCURRENT_REQUESTS'] = args.concurrent
        if args.delay:
            kwargs['DOWNLOAD_DELAY'] = args.delay
        
        if args.all:
            # 运行所有爬虫
            spider_names = manager.config_manager.list_spider_configs()
            if spider_names:
                manager.run_multiple_spiders(spider_names, clean_outputs=args.clean_outputs, **kwargs)
            else:
                print("没有找到可运行的爬虫配置")
        elif args.spider:
            # 运行指定爬虫
            success = manager.run_spider(args.spider, clean_outputs=args.clean_outputs, **kwargs)
            if not success:
                sys.exit(1)
        else:
            print("请指定要运行的爬虫名称或使用 --all 参数")
            
    elif args.command == 'add':
        # 添加爬虫配置
        if not all([args.name, args.site_name]):
            print("添加爬虫配置需要指定 --name, --site-name 参数")
            sys.exit(1)
        
        kwargs = {}
        if args.allowed_domains:
            kwargs['allowed_domains'] = args.allowed_domains
        if args.concurrent:
            kwargs['concurrent_requests'] = args.concurrent
        if args.delay:
            kwargs['download_delay'] = args.delay
        
        manager.add_spider_config(args.name, args.site_name, **kwargs)
        
    elif args.command == 'remove':
        # 删除爬虫配置
        if not args.name:
            print("删除爬虫配置需要指定 --name 参数")
            sys.exit(1)
        
        manager.remove_spider_config(args.name)
        
    elif args.command == 'init':
        # 初始化输出目录结构
        manager.create_output_structure()
        print("输出目录结构初始化完成")


if __name__ == '__main__':
    main()
