#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
云盘资源爬取系统主程序

支持功能：
1. 分享码导入（单个、批量、文件导入）
2. 资源自动爬取
3. 定时任务
4. 统计信息查看
5. 失败重试
"""

import argparse
import logging
import sys
import os
import signal
import time
from typing import List, Optional
from datetime import datetime

# 添加当前目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))

from .database import init_database, DatabaseManager
from .models import CloudType, ShareStatus
from .importer import ShareCodeImporter
from .crawler import CloudResourceCrawler

class CloudCrawlerApp:
    """云盘爬虫应用主类"""
    
    def __init__(self):
        self.db: Optional[DatabaseManager] = None
        self.importer: Optional[ShareCodeImporter] = None
        self.crawler: Optional[CloudResourceCrawler] = None
        self.scheduler: Optional[CrawlerScheduler] = None
        self.logger = logging.getLogger(__name__)
        
        # 注册信号处理器
        signal.signal(signal.SIGINT, self._signal_handler)
        signal.signal(signal.SIGTERM, self._signal_handler)
    
    def _signal_handler(self, signum, frame):
        """信号处理器"""
        self.logger.info(f"接收到信号 {signum}，正在优雅关闭...")
        self.shutdown()
        sys.exit(0)
    
    def initialize(self) -> bool:
        """初始化应用"""
        try:
            # 初始化数据库
            self.db = init_global_session()
            if not self.db:
                self.logger.error("数据库初始化失败")
                return False
            
            # 初始化组件
            self.importer = ShareCodeImporter(self.db)
            self.crawler = CloudResourceCrawler(self.db, max_workers=3, delay_between_requests=1.0)
            self.scheduler = CrawlerScheduler(self.crawler)
            
            self.logger.info("应用初始化成功")
            return True
            
        except Exception as e:
            self.logger.error(f"应用初始化失败: {str(e)}")
            return False
    
    def shutdown(self):
        """关闭应用"""
        try:
            if self.scheduler:
                self.scheduler.stop_scheduled_crawl()
            
            if self.crawler:
                self.crawler.stop_auto_crawl()
            
            if self.db:
                self.db.disconnect()
            
            self.logger.info("应用已关闭")
            
        except Exception as e:
            self.logger.error(f"关闭应用时发生错误: {str(e)}")
    
    def import_single_share(self, clouds_type: str, share_code: str, access_code: str = "",
                           share_name: str = "", full_url: str = "", force_update: bool = False):
        """导入单个分享码"""
        print(f"正在导入分享码: {share_code}")
        
        result = self.importer.import_single(
            clouds_type=clouds_type,
            share_code=share_code,
            access_code=access_code,
            share_name=share_name,
            full_url=full_url,
            force_update=force_update
        )
        
        print(f"导入结果: {result.get_summary()}")
        
        if result.errors:
            print("错误信息:")
            for error in result.errors:
                print(f"  - {error}")
    
    def import_from_file(self, file_path: str, force_update: bool = False, sheet_name: str = None):
        """从文件导入分享码"""
        print(f"正在从文件导入: {file_path}")
        
        if not os.path.exists(file_path):
            print(f"文件不存在: {file_path}")
            return
        
        file_ext = os.path.splitext(file_path)[1].lower()
        
        if file_ext == '.csv':
            result = self.importer.import_from_csv(file_path, force_update)
        elif file_ext == '.json':
            result = self.importer.import_from_json(file_path, force_update)
        elif file_ext in ['.xlsx', '.xls']:
            result = self.importer.import_from_excel(file_path, force_update, sheet_name)
        elif file_ext == '.txt':
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
            result = self.importer.import_from_text(content, CloudType.TIANYI.value, force_update)
        else:
            print(f"不支持的文件格式: {file_ext}，支持的格式: .csv, .json, .xlsx, .xls, .txt")
            return
        
        print(f"导入结果: {result.get_summary()}")
        
        if result.errors:
            print("错误信息:")
            for error in result.errors[:10]:  # 只显示前10个错误
                print(f"  - {error}")
            if len(result.errors) > 10:
                print(f"  ... 还有 {len(result.errors) - 10} 个错误")
    
    def crawl_pending(self, clouds_type: str = None, limit: int = 100, depth: int = 3):
        """爬取待处理资源"""
        print(f"正在爬取待处理资源，限制: {limit}，深度: {depth}")
        
        result = self.crawler.crawl_pending_resources(
            clouds_type=clouds_type,
            limit=limit,
            depth=depth,
            use_threading=True
        )
        
        print(f"爬取结果: {result.get_summary()}")
        
        if result.errors:
            print("错误信息:")
            for error in result.errors[:5]:  # 只显示前5个错误
                print(f"  - {error}")
            if len(result.errors) > 5:
                print(f"  ... 还有 {len(result.errors) - 5} 个错误")
    
    def crawl_by_codes(self, share_codes: List[str], clouds_type: str = CloudType.TIANYI.value, depth: int = 3):
        """根据分享码爬取"""
        print(f"正在爬取指定分享码: {', '.join(share_codes)}")
        
        result = self.crawler.crawl_by_share_codes(
            share_codes=share_codes,
            clouds_type=clouds_type,
            depth=depth,
            use_threading=True
        )
        
        print(f"爬取结果: {result.get_summary()}")
        
        if result.errors:
            print("错误信息:")
            for error in result.errors:
                print(f"  - {error}")
    
    def retry_failed(self, limit: int = 50, depth: int = 3):
        """重试失败的资源"""
        print(f"正在重试失败资源，限制: {limit}")
        
        result = self.crawler.retry_failed_resources(limit=limit, depth=depth)
        
        print(f"重试结果: {result.get_summary()}")
        
        if result.errors:
            print("错误信息:")
            for error in result.errors:
                print(f"  - {error}")
    
    def show_statistics(self):
        """显示统计信息"""
        print("=== 导入统计 ===")
        import_stats = self.importer.get_import_statistics()
        if import_stats:
            print(f"总资源数: {import_stats.get('total', 0)}")
            print(f"已处理: {import_stats.get('processed', 0)}")
            print(f"未处理: {import_stats.get('unprocessed', 0)}")
            
            print("\n按云盘类型:")
            for cloud_type, count in import_stats.get('by_type', {}).items():
                print(f"  {cloud_type}: {count}")
            
            print("\n按状态:")
            for status, count in import_stats.get('by_status', {}).items():
                print(f"  {status}: {count}")
        
        print("\n=== 爬取统计 ===")
        crawl_stats = self.crawler.get_crawl_statistics()
        if crawl_stats:
            print(f"处理进度: {crawl_stats.get('processing_rate', 0)}%")
            
            print("\n按云盘类型处理情况:")
            for cloud_type, stats in crawl_stats.get('by_type', {}).items():
                print(f"  {cloud_type}: 已处理 {stats['processed']}, 未处理 {stats['unprocessed']}, 总计 {stats['total']}")
    
    def start_daemon(self, interval_minutes: int = 30, batch_size: int = 20):
        """启动守护进程模式"""
        print(f"启动守护进程模式，间隔: {interval_minutes}分钟，批次大小: {batch_size}")
        
        self.scheduler.start_scheduled_crawl(interval_minutes, batch_size)
        
        try:
            print("守护进程已启动，按 Ctrl+C 停止")
            while True:
                time.sleep(1)
                if not self.scheduler.is_running():
                    break
        except KeyboardInterrupt:
            print("\n正在停止守护进程...")
        finally:
            self.scheduler.stop_scheduled_crawl()
            print("守护进程已停止")


def setup_logging(log_level: str = "INFO", log_file: str = None):
    """设置日志"""
    level = getattr(logging, log_level.upper(), logging.INFO)
    
    # 创建格式器
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )
    
    # 设置根日志器
    root_logger = logging.getLogger()
    root_logger.setLevel(level)
    
    # 清除现有处理器
    for handler in root_logger.handlers[:]:
        root_logger.removeHandler(handler)
    
    # 控制台处理器
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setFormatter(formatter)
    root_logger.addHandler(console_handler)
    
    # 文件处理器
    if log_file:
        file_handler = logging.FileHandler(log_file, encoding='utf-8')
        file_handler.setFormatter(formatter)
        root_logger.addHandler(file_handler)


def create_parser() -> argparse.ArgumentParser:
    """创建命令行参数解析器"""
    parser = argparse.ArgumentParser(
        description="云盘资源爬取系统",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
示例用法:
  # 导入单个分享码
  python main.py import-single --type 天翼云 --code ABC123 --access-code 1234 --name "测试分享"
  
  # 从CSV文件批量导入
  python main.py import-file --file shares.csv
  
  # 从Excel文件导入
  python main.py import-file --file shares.xlsx --sheet Sheet1
  
  # 爬取待处理资源
  python main.py crawl-pending --limit 50 --depth 3
  
  # 根据分享码爬取
  python main.py crawl-codes --codes ABC123 DEF456
  
  # 重试失败资源
  python main.py retry-failed --limit 20
  
  # 查看统计信息
  python main.py stats
  
  # 启动守护进程
  python main.py daemon --interval 30 --batch-size 20
        """
    )
    
    # 全局参数
    parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], 
                       default='INFO', help='日志级别')
    parser.add_argument('--log-file', help='日志文件路径')
    
    # 子命令
    subparsers = parser.add_subparsers(dest='command', help='可用命令')
    
    # 导入单个分享码
    import_single_parser = subparsers.add_parser('import-single', help='导入单个分享码')
    import_single_parser.add_argument('--type', required=True, choices=[ct.value for ct in CloudType],
                                     help='云盘类型')
    import_single_parser.add_argument('--code', required=True, help='分享码')
    import_single_parser.add_argument('--access-code', default='', help='访问码')
    import_single_parser.add_argument('--name', default='', help='分享名称')
    import_single_parser.add_argument('--url', default='', help='分享链接')
    import_single_parser.add_argument('--force', action='store_true', help='强制更新已存在的记录')
    
    # 从文件导入
    import_file_parser = subparsers.add_parser('import-file', help='从文件导入分享码')
    import_file_parser.add_argument('--file', required=True, help='文件路径（支持CSV、JSON、Excel、TXT）')
    import_file_parser.add_argument('--sheet', help='Excel工作表名称（仅Excel文件有效）')
    import_file_parser.add_argument('--force', action='store_true', help='强制更新已存在的记录')
    
    # 爬取待处理资源
    crawl_pending_parser = subparsers.add_parser('crawl-pending', help='爬取待处理资源')
    crawl_pending_parser.add_argument('--type', choices=[ct.value for ct in CloudType],
                                     help='云盘类型（不指定则爬取所有类型）')
    crawl_pending_parser.add_argument('--limit', type=int, default=100, help='爬取数量限制')
    crawl_pending_parser.add_argument('--depth', type=int, default=20, help='目录遍历深度')
    
    # 根据分享码爬取
    crawl_codes_parser = subparsers.add_parser('crawl-codes', help='根据分享码爬取')
    crawl_codes_parser.add_argument('--codes', nargs='+', required=True, help='分享码列表')
    crawl_codes_parser.add_argument('--type', default=CloudType.TIANYI.value, 
                                   choices=[ct.value for ct in CloudType], help='云盘类型')
    crawl_codes_parser.add_argument('--depth', type=int, default=3, help='目录遍历深度')
    
    # 重试失败资源
    retry_parser = subparsers.add_parser('retry-failed', help='重试失败的资源')
    retry_parser.add_argument('--limit', type=int, default=50, help='重试数量限制')
    retry_parser.add_argument('--depth', type=int, default=3, help='目录遍历深度')
    
    # 查看统计信息
    subparsers.add_parser('stats', help='查看统计信息')
    
    # 守护进程模式
    daemon_parser = subparsers.add_parser('daemon', help='启动守护进程模式')
    daemon_parser.add_argument('--interval', type=int, default=30, help='检查间隔（分钟）')
    daemon_parser.add_argument('--batch-size', type=int, default=20, help='每批处理数量')
    
    return parser


def main():
    """主函数"""
    parser = create_parser()
    args = parser.parse_args()
    
    # 设置日志
    setup_logging(args.log_level, args.log_file)
    
    # 创建应用实例
    app = CloudCrawlerApp()
    
    try:
        # 初始化应用
        if not app.initialize():
            print("应用初始化失败")
            sys.exit(1)
        
        # 执行命令
        if args.command == 'import-single':
            app.import_single_share(
                clouds_type=args.type,
                share_code=args.code,
                access_code=args.access_code,
                share_name=args.name,
                full_url=args.url,
                force_update=args.force
            )
        
        elif args.command == 'import-file':
            app.import_from_file(args.file, args.force, getattr(args, 'sheet', None))
        
        elif args.command == 'crawl-pending':
            app.crawl_pending(args.type, args.limit, args.depth)
        
        elif args.command == 'crawl-codes':
            app.crawl_by_codes(args.codes, args.type, args.depth)
        
        elif args.command == 'retry-failed':
            app.retry_failed(args.limit, args.depth)
        
        elif args.command == 'stats':
            app.show_statistics()
        
        elif args.command == 'daemon':
            app.start_daemon(args.interval, args.batch_size)
        
        else:
            parser.print_help()
    
    except KeyboardInterrupt:
        print("\n操作被用户中断")
    except Exception as e:
        logging.error(f"程序执行出错: {str(e)}")
        sys.exit(1)
    finally:
        app.shutdown()


if __name__ == "__main__":
    main()