"""
任务控制API
提供手动触发爬虫任务的接口
"""
from fastapi import APIRouter, HTTPException, BackgroundTasks, Depends
from pydantic import BaseModel, Field
from typing import Optional, Dict, Any, List
from datetime import datetime
import uuid
import sys
import os

# 添加task-queue路径
task_queue_path = os.getenv('TASK_QUEUE_PATH',
                            os.path.abspath(os.path.join(
                                os.path.dirname(__file__),
                                '../../../../task-queue/src'
                            )))
sys.path.insert(0, task_queue_path)

from celery.result import AsyncResult
from src.celery_app import app as celery_app
import redis
import motor.motor_asyncio

# 导入认证模块
from .auth import AuthUser, require_crawler_trigger, require_crawler_status, get_current_user

router = APIRouter(prefix="/api/v1/crawlers", tags=["crawlers"])

# Redis客户端
redis_client = redis.Redis(
    host=os.getenv('REDIS_HOST', 'localhost'),
    port=int(os.getenv('REDIS_PORT', 6379)),
    db=4,
    decode_responses=True
)

# MongoDB客户端
mongo_client = motor.motor_asyncio.AsyncIOMotorClient(
    os.getenv('MONGODB_URI', 'mongodb://localhost:27017')
)
db = mongo_client[os.getenv('MONGODB_DB', 'crawler_scheduler')]

class TriggerRequest(BaseModel):
    """触发请求模型"""
    force: bool = Field(False, description="是否强制爬取（忽略增量更新）")
    priority: int = Field(5, ge=0, le=10, description="任务优先级（0-10）")
    params: Dict[str, Any] = Field(default_factory=dict, description="额外参数")
    dedupe_window: int = Field(60, description="去重时间窗口（秒）")

class TriggerResponse(BaseModel):
    """触发响应模型"""
    task_id: str = Field(..., description="任务ID")
    platform: str = Field(..., description="平台名称")
    status: str = Field(..., description="任务状态")
    triggered_at: datetime = Field(..., description="触发时间")
    message: str = Field(..., description="响应消息")

# 支持的爬虫平台
SUPPORTED_PLATFORMS = [
    'weibo', 'zhihu', 'toutiao', 'baidu', 
    'xiaohongshu', 'douyin', 'twitter', 'reddit', 'youtube'
]

def check_duplicate_task(platform: str, window: int = 60) -> bool:
    """
    检查是否有重复任务
    
    Args:
        platform: 平台名称
        window: 时间窗口（秒）
        
    Returns:
        是否存在重复任务
    """
    key = f"trigger_lock:{platform}"
    
    # 使用Redis实现分布式锁
    if redis_client.set(key, "1", nx=True, ex=window):
        return False  # 没有重复
    return True  # 有重复任务

@router.post("/{crawler_name}/trigger", response_model=TriggerResponse)
async def trigger_crawler(
    crawler_name: str,
    request: TriggerRequest,
    background_tasks: BackgroundTasks,
    current_user: AuthUser = Depends(require_crawler_trigger)
):
    """
    手动触发爬虫任务（需要认证）
    
    Args:
        crawler_name: 爬虫名称（平台名）
        request: 触发请求
        current_user: 当前认证用户
        
    Returns:
        触发响应
    """
    # 验证爬虫名称
    if crawler_name not in SUPPORTED_PLATFORMS:
        raise HTTPException(
            status_code=400,
            detail=f"Unsupported crawler: {crawler_name}. "
                  f"Supported: {', '.join(SUPPORTED_PLATFORMS)}"
        )
    
    # 检查重复任务
    if check_duplicate_task(crawler_name, request.dedupe_window):
        raise HTTPException(
            status_code=429,
            detail=f"A task for {crawler_name} was triggered recently. "
                  f"Please wait {request.dedupe_window} seconds."
        )
    
    # 构建任务名称
    task_name = f"src.tasks.crawler.crawl_{crawler_name}"
    
    # 获取任务
    task = celery_app.tasks.get(task_name)
    if not task:
        raise HTTPException(
            status_code=500,
            detail=f"Task {task_name} not found"
        )
    
    # 生成任务ID
    task_id = f"{crawler_name}_{uuid.uuid4().hex[:8]}_{int(datetime.utcnow().timestamp())}"
    
    try:
        # 触发任务
        result = task.apply_async(
            kwargs={'force': request.force, **request.params},
            task_id=task_id,
            priority=request.priority,
            queue=f'crawler_queue'
        )
        
        # 记录触发历史
        background_tasks.add_task(
            save_trigger_history,
            task_id=task_id,
            platform=crawler_name,
            request_data=request.dict()
        )
        
        return TriggerResponse(
            task_id=task_id,
            platform=crawler_name,
            status="PENDING",
            triggered_at=datetime.utcnow(),
            message=f"Task {task_id} triggered successfully"
        )
        
    except Exception as e:
        # 释放锁
        redis_client.delete(f"trigger_lock:{crawler_name}")
        
        raise HTTPException(
            status_code=500,
            detail=f"Failed to trigger task: {str(e)}"
        )

async def save_trigger_history(task_id: str, platform: str, request_data: dict):
    """
    保存触发历史
    
    Args:
        task_id: 任务ID
        platform: 平台名称
        request_data: 请求数据
    """
    collection = db.trigger_history
    
    await collection.insert_one({
        'task_id': task_id,
        'platform': platform,
        'request': request_data,
        'triggered_at': datetime.utcnow(),
        'triggered_by': 'manual',
        'user': task_id.split('_')[0] if '_' in task_id else 'unknown'
    })

@router.get("/{crawler_name}/status")
async def get_crawler_status(
    crawler_name: str,
    current_user: AuthUser = Depends(require_crawler_status)
):
    """
    获取爬虫状态（需要认证）
    
    Args:
        crawler_name: 爬虫名称
        current_user: 当前认证用户
        
    Returns:
        爬虫状态信息
    """
    if crawler_name not in SUPPORTED_PLATFORMS:
        raise HTTPException(
            status_code=400,
            detail=f"Unsupported crawler: {crawler_name}"
        )
    
    # 获取最近的任务
    collection = db.task_results
    
    recent_tasks = await collection.find({
        'task_name': f'src.tasks.crawler.crawl_{crawler_name}'
    }).sort('timestamp', -1).limit(10).to_list(10)
    
    # 计算统计信息
    success_count = sum(1 for t in recent_tasks if t['status'] == 'success')
    failed_count = sum(1 for t in recent_tasks if t['status'] == 'failed')
    
    # 获取最后更新时间
    hot_items_collection = db[f'{crawler_name}_hot_items']
    last_item = await hot_items_collection.find_one(
        sort=[('crawled_at', -1)]
    )
    
    return {
        'crawler': crawler_name,
        'last_update': last_item['crawled_at'] if last_item else None,
        'recent_tasks': {
            'total': len(recent_tasks),
            'success': success_count,
            'failed': failed_count,
            'success_rate': success_count / len(recent_tasks) if recent_tasks else 0
        },
        'is_locked': redis_client.exists(f"trigger_lock:{crawler_name}")
    }

@router.get("/", response_model=List[str])
async def list_crawlers(
    current_user: AuthUser = Depends(get_current_user)
):
    """
    列出所有支持的爬虫（需要认证）
    
    Args:
        current_user: 当前认证用户
    
    Returns:
        爬虫列表
    """
    return SUPPORTED_PLATFORMS

@router.post("/batch/trigger")
async def trigger_batch(
    platforms: List[str],
    request: TriggerRequest,
    background_tasks: BackgroundTasks,
    current_user: AuthUser = Depends(require_crawler_trigger)
):
    """
    批量触发爬虫（需要认证）
    
    Args:
        platforms: 平台列表
        request: 触发请求
        current_user: 当前认证用户
        
    Returns:
        批量触发结果
    """
    results = []
    
    for platform in platforms:
        if platform not in SUPPORTED_PLATFORMS:
            results.append({
                'platform': platform,
                'success': False,
                'error': f"Unsupported platform: {platform}"
            })
            continue
            
        # 检查重复
        if check_duplicate_task(platform, request.dedupe_window):
            results.append({
                'platform': platform,
                'success': False,
                'error': "Task triggered recently"
            })
            continue
            
        # 触发任务
        task_name = f"src.tasks.crawler.crawl_{platform}"
        task = celery_app.tasks.get(task_name)
        
        if not task:
            results.append({
                'platform': platform,
                'success': False,
                'error': f"Task not found"
            })
            continue
            
        try:
            task_id = f"{platform}_{uuid.uuid4().hex[:8]}_{int(datetime.utcnow().timestamp())}"
            
            task.apply_async(
                kwargs={'force': request.force, **request.params},
                task_id=task_id,
                priority=request.priority,
                queue='crawler_queue'
            )
            
            background_tasks.add_task(
                save_trigger_history,
                task_id=task_id,
                platform=platform,
                request_data=request.dict()
            )
            
            results.append({
                'platform': platform,
                'success': True,
                'task_id': task_id
            })
            
        except Exception as e:
            redis_client.delete(f"trigger_lock:{platform}")
            results.append({
                'platform': platform,
                'success': False,
                'error': str(e)
            })
            
    return {
        'triggered': sum(1 for r in results if r['success']),
        'failed': sum(1 for r in results if not r['success']),
        'results': results
    }