from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks
from sqlalchemy.orm import Session
from sqlalchemy import func
from app.database import get_db
from app.services.torrent_service import TorrentService
from app import schemas, models  # 添加models导入
import asyncio
import uuid
from typing import Dict, Any, Optional, List
import logging
from datetime import datetime

router = APIRouter()
logger = logging.getLogger(__name__)

# 全局任务状态存储
scan_tasks: Dict[str, Dict[str, Any]] = {}

@router.post("/start")
async def start_scan(background_tasks: BackgroundTasks, db: Session = Depends(get_db)):
    """启动异步扫描任务"""
    task_id = str(uuid.uuid4())
    
    # 初始化任务状态
    scan_tasks[task_id] = {
        "status": "running",
        "progress": 0,
        "current_step": "初始化扫描...",
        "total_steps": 0,
        "completed_steps": 0,
        "result": None,
        "error": None
    }
    
    logger.info(f"启动扫描任务，任务ID: {task_id}")
    
    # 启动后台任务
    background_tasks.add_task(run_scan_task, task_id, db)
    
    return {"task_id": task_id, "status": "started"}

# 在 run_scan_task 函数中添加更频繁的进度更新
async def run_scan_task(task_id: str, db: Session):
    """执行扫描任务"""
    try:
        logger.info(f"开始执行扫描任务: {task_id}")
        service = TorrentService(db)
        logger.info(f"数据库路径: {service.db_path}")
        
        # 更新进度：获取配置
        scan_tasks[task_id].update({
            "current_step": "获取客户端配置...",
            "progress": 5
        })
        
        configs = await service.config_repo.get_enabled_configs(db)
        if not configs:
            raise ValueError("没有启用的下载器")
            
        scan_tasks[task_id].update({
            "total_steps": len(configs) + 2,
            "current_step": f"找到 {len(configs)} 个启用的客户端",
            "progress": 10
        })
        
        # 获取扫描路径
        scan_tasks[task_id].update({
            "current_step": "获取扫描路径配置...",
            "progress": 12
        })
        
        scan_paths = await service.scan_path_repo.get_enabled_paths(db)
        if not scan_paths:
            raise ValueError("没有配置扫描路径")
            
        scan_tasks[task_id].update({
            "current_step": f"找到 {len(scan_paths)} 个扫描路径",
            "progress": 15
        })
        
        all_seeding_files = []
        
        # 逐个处理客户端 (15% - 45%)
        for i, config in enumerate(configs):
            base_progress = 15 + (i * 30 // len(configs))
            
            scan_tasks[task_id].update({
                "current_step": f"连接到 {config.client_type}:{config.host}...",
                "progress": base_progress,
                "completed_steps": i
            })
            
            try:
                # 添加连接超时检测
                scan_tasks[task_id].update({
                    "current_step": f"创建 {config.client_type} 客户端...",
                    "progress": base_progress + 2
                })
                
                client = service._create_client(config)
                if not client:
                    logger.warning(f"无法创建客户端: {config.client_type}:{config.host}")
                    continue
                
                scan_tasks[task_id].update({
                    "current_step": f"测试连接到 {config.client_type}:{config.host}...",
                    "progress": base_progress + 5
                })
                
                # 添加连接超时处理
                connection_task = asyncio.create_task(client.test_connection())
                try:
                    connected = await asyncio.wait_for(connection_task, timeout=30.0)
                except asyncio.TimeoutError:
                    scan_tasks[task_id].update({
                        "current_step": f"连接 {config.client_type}:{config.host} 超时",
                        "progress": base_progress + 8
                    })
                    logger.warning(f"连接客户端超时: {config.client_type}:{config.host}")
                    continue
                
                if not connected:
                    scan_tasks[task_id].update({
                        "current_step": f"无法连接到 {config.client_type}:{config.host}",
                        "progress": base_progress + 8
                    })
                    logger.warning(f"无法连接到客户端: {config.client_type}:{config.host}")
                    continue
                    
                # 获取做种文件
                scan_tasks[task_id].update({
                    "current_step": f"从 {config.client_type}:{config.host} 获取做种文件...",
                    "progress": base_progress + 10
                })
                
                # 添加获取文件超时处理
                seeding_task = asyncio.create_task(client.get_seeding_files())
                try:
                    seeding_files = await asyncio.wait_for(seeding_task, timeout=60.0)
                except asyncio.TimeoutError:
                    scan_tasks[task_id].update({
                        "current_step": f"从 {config.client_type}:{config.host} 获取文件超时",
                        "progress": base_progress + 15
                    })
                    logger.warning(f"获取做种文件超时: {config.client_type}:{config.host}")
                    continue
                
                all_seeding_files.extend(seeding_files)
                
                scan_tasks[task_id].update({
                    "current_step": f"从 {config.client_type}:{config.host} 获取到 {len(seeding_files)} 个做种文件",
                    "progress": 15 + ((i + 1) * 30 // len(configs))
                })
                
            except Exception as client_error:
                logger.error(f"处理客户端 {config.client_type}:{config.host} 时出错: {str(client_error)}")
                scan_tasks[task_id].update({
                    "current_step": f"处理 {config.client_type}:{config.host} 时出错: {str(client_error)}",
                    "progress": base_progress + 15
                })
                continue
        
        # 扫描本地文件 (45% - 85%)
        scan_tasks[task_id].update({
            "current_step": "开始扫描本地文件...",
            "progress": 45
        })
        
        all_not_seeding_files = []
        total_scan_paths = len(scan_paths)
        
        for path_index, scan_path in enumerate(scan_paths):
            path = scan_path.get_path()
            base_progress = 45 + (path_index * 30 // total_scan_paths)
            
            try:
                # 文件扫描进度回调
                def scan_progress_callback(processed: int, total: int, message: str):
                    progress_offset = (processed / total * 15) if total > 0 else 0
                    scan_tasks[task_id].update({
                        "current_step": f"扫描路径 {path}: {message}",
                        "progress": int(base_progress + progress_offset)
                    })
                
                local_files = await service.file_scanner.scan_directory(path, scan_progress_callback)
                
                # 文件比较进度回调
                def compare_progress_callback(processed: int, total: int, message: str):
                    progress_offset = 15 + (processed / total * 15) if total > 0 else 15
                    scan_tasks[task_id].update({
                        "current_step": f"比较路径 {path}: {message}",
                        "progress": int(base_progress + progress_offset)
                    })
                
                seeding_paths = [file["path"] for file in all_seeding_files]
                not_seeding = await service.file_scanner.compare_files(local_files, seeding_paths, compare_progress_callback)
                all_not_seeding_files.extend(not_seeding)
                
            except Exception as scan_error:
                logger.error(f"扫描路径 {path} 时出错: {str(scan_error)}")
                scan_tasks[task_id].update({
                    "current_step": f"扫描路径 {path} 时出错: {str(scan_error)}",
                    "progress": base_progress + 15
                })
                continue
        
        # 保存结果 (85% - 100%)
        scan_tasks[task_id].update({
            "current_step": "保存扫描结果到数据库...",
            "progress": 85
        })
        
        try:
            # 保存到数据库
            scan_results = [
                schemas.ScanResultCreate(
                    file_path=file["path"],
                    file_name=file["name"],
                    file_size=file["size"],
                    status="not_seeding"
                )
                for file in all_not_seeding_files
            ]
            
            scan_tasks[task_id].update({
                "current_step": "清理旧的扫描结果...",
                "progress": 90
            })
            
            # 清除旧的扫描结果
            await service.scan_repo.delete_all_results(db)
            
            scan_tasks[task_id].update({
                "current_step": "创建新的扫描结果...",
                "progress": 95
            })
            
            # 创建新的扫描结果
            results = await service.scan_repo.create_scan_results(db, scan_results)
            
            # 完成
            scan_tasks[task_id].update({
                "status": "completed",
                "current_step": f"扫描完成，找到 {len(all_not_seeding_files)} 个未做种文件",
                "progress": 100,
                "result": {
                    "total_seeding_files": len(all_seeding_files),
                    "total_not_seeding_files": len(all_not_seeding_files),
                    "scan_paths_count": len(scan_paths),
                    "clients_count": len(configs)
                }
            })
            
            logger.info(f"扫描任务 {task_id} 完成成功")
            
        except Exception as save_error:
            logger.error(f"保存扫描结果时出错: {str(save_error)}")
            raise save_error
        
    except Exception as e:
        logger.error(f"扫描任务 {task_id} 失败: {str(e)}")
        scan_tasks[task_id].update({
            "status": "failed",
            "error": str(e),
            "progress": 0,
            "current_step": f"扫描失败: {str(e)}"
        })

@router.get("/progress/{task_id}")
async def get_scan_progress(task_id: str):
    """获取扫描进度"""
    logger.info(f"获取扫描任务 {task_id} 的进度内容: {scan_tasks}")
    if task_id not in scan_tasks:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    return scan_tasks[task_id]

@router.get("/status")
async def get_scan_status():
    """获取所有扫描任务状态"""
    return {"tasks": scan_tasks}

@router.get("/health")
async def health_check():
    """健康检查端点"""
    return {"status": "healthy", "timestamp": datetime.now().isoformat()}

@router.get("/results", response_model=List[schemas.ScanResult])
async def get_scan_results(
    status: Optional[str] = None,
    skip: int = 0,
    limit: int = 100,
    db: Session = Depends(get_db)
):
    """获取扫描结果"""
    try:
        service = TorrentService(db)
        # 用日志输出接收到的前端参数
        # logger.info(f"get_scan_results 收到的参数: status={status}, skip={skip}, limit={limit}")
        
        # # 添加更详细的参数检查日志
        # logger.info(f"原始请求URL: {db.bind.engine.url if hasattr(db.bind, 'engine') else 'N/A'}")
        # logger.info(f"参数类型检查: skip类型={type(skip)}, limit类型={type(limit)}")
        # logger.info(f"参数值验证: skip={skip}, limit={limit}")
        
        results = await service.get_scan_results(status=status, skip=skip, limit=limit)
        logger.info(f"返回结果数量: {len(results)}")
        return results
    except Exception as e:
        logger.error(f"获取扫描结果失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取扫描结果失败: {str(e)}")

@router.get("/results/count")
async def get_scan_results_count(
    status: Optional[str] = None,
    db: Session = Depends(get_db)
):
    """获取扫描结果总数"""
    try:
        # 直接获取总数，不限制数量
        query = db.query(func.count(models.ScanResult.id))
        if status:
            query = query.filter(models.ScanResult.status == status)
        total = query.scalar()
        return {"total": total}
    except Exception as e:
        logger.error(f"获取扫描结果总数失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取扫描结果总数失败: {str(e)}")
