"""
分片任务管理API
提供类似xxl-job-admin的分片集群管理功能
"""
from fastapi import APIRouter, Depends, HTTPException, status, Query
from sqlalchemy.orm import Session
from typing import List, Optional, Dict, Any
from datetime import datetime
from uuid import UUID

from app.database.base import get_db
from app.core.auth import get_current_user
from app.models.user import User
from app.services.shard_task import shard_task_service, ShardingStrategy
from app.services.scheduled_job import ScheduledJobService
from pydantic import BaseModel


# Pydantic模型
class ShardStatusResponse(BaseModel):
    """分片状态响应模型"""
    job_id: str
    total_shards: int
    active_nodes: int
    node_list: List[str]
    shards: List[Dict[str, Any]]
    status: str


class ShardOptimizationRequest(BaseModel):
    """分片优化请求模型"""
    data_size: int
    available_nodes: Optional[int] = 3
    processing_time_per_item: Optional[float] = 0.1


class ShardOptimizationResponse(BaseModel):
    """分片优化响应模型"""
    recommended_shards: int
    estimated_time_per_shard: float
    estimated_total_time: float
    utilization_rate: float


class ShardExecutionRequest(BaseModel):
    """分片执行请求模型"""
    handler_name: str
    shard_count: int = 5
    shard_strategy: str = ShardingStrategy.HASH
    parameters: Dict[str, Any] = {}


router = APIRouter(prefix="/shard-tasks", tags=["分片任务管理"])
job_service = ScheduledJobService()


@router.get("/{job_id}/status", response_model=ShardStatusResponse)
async def get_shard_status(
    job_id: UUID,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """获取任务的分片执行状态"""
    # 检查任务是否存在
    job = job_service.get_job(db, job_id)
    if not job:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="任务不存在"
        )
    
    # 获取分片状态
    shard_status = shard_task_service.get_shard_status(job_id)
    
    return ShardStatusResponse(**shard_status)


@router.post("/{job_id}/trigger-shard")
async def trigger_shard_execution(
    job_id: UUID,
    request: ShardExecutionRequest,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """手动触发分片任务执行"""
    # 检查任务是否存在
    job = job_service.get_job(db, job_id)
    if not job:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="任务不存在"
        )
    
    try:
        # 准备分片参数
        shard_params = {
            **request.parameters,
            'enable_sharding': True,
            'shard_count': request.shard_count,
            'shard_strategy': request.shard_strategy,
            'max_shards': request.shard_count
        }
        
        # 创建执行日志
        from app.schemas.scheduled_job import JobLogCreate, TriggerType
        log_data = JobLogCreate(
            job_id=job_id,
            trigger_type=TriggerType.MANUAL,
            executor_handler=request.handler_name,
            executor_params=shard_params,
            trigger_time=datetime.now()
        )
        log = job_service.create_job_log(db, log_data)
        
        # 执行分片任务
        import asyncio
        task = asyncio.create_task(shard_task_service.execute_shard_task(
            job_id=job_id,
            handler_name=request.handler_name,
            params=shard_params,
            log_id=log.id,  # type: ignore
            max_shards=request.shard_count
        ))
        
        return {
            "message": "分片任务已触发",
            "job_id": str(job_id),
            "log_id": str(log.id),  # type: ignore
            "shard_count": request.shard_count,
            "shard_strategy": request.shard_strategy
        }
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"触发分片任务失败: {str(e)}"
        )


@router.post("/optimize", response_model=ShardOptimizationResponse)
async def optimize_sharding(
    request: ShardOptimizationRequest,
    current_user: User = Depends(get_current_user)
):
    """分片数量优化建议"""
    try:
        # 计算最优分片数
        optimal_shards = shard_task_service.estimate_optimal_shards(
            data_size=request.data_size,
            available_nodes=request.available_nodes,
            processing_time_per_item=request.processing_time_per_item
        )
        
        # 计算预估时间
        items_per_shard = request.data_size / optimal_shards
        estimated_time_per_shard = items_per_shard * request.processing_time_per_item
        
        # 并行执行，总时间等于最慢分片的时间
        estimated_total_time = estimated_time_per_shard
        
        # 计算资源利用率
        utilization_rate = min(optimal_shards / request.available_nodes, 1.0)
        
        return ShardOptimizationResponse(
            recommended_shards=optimal_shards,
            estimated_time_per_shard=estimated_time_per_shard,
            estimated_total_time=estimated_total_time,
            utilization_rate=utilization_rate
        )
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"分片优化计算失败: {str(e)}"
        )


@router.get("/strategies")
async def get_sharding_strategies(
    current_user: User = Depends(get_current_user)
):
    """获取支持的分片策略"""
    return {
        "strategies": [
            {
                "name": ShardingStrategy.HASH,
                "description": "哈希分片：通过取模方式分配数据，适合均匀分布的场景",
                "use_case": "用户ID、订单ID等数值型数据的处理"
            },
            {
                "name": ShardingStrategy.RANGE,
                "description": "范围分片：将数据按范围均匀分配，适合有序数据",
                "use_case": "时间序列数据、数据库主键范围处理"
            },
            {
                "name": ShardingStrategy.ROUND_ROBIN,
                "description": "轮询分片：按轮询方式分配数据，确保负载均衡",
                "use_case": "任意类型数据的均匀分配处理"
            },
            {
                "name": ShardingStrategy.CUSTOM,
                "description": "自定义分片：由业务逻辑决定分片规则",
                "use_case": "复杂业务场景的定制化分片"
            }
        ]
    }


@router.get("/handlers")
async def get_shard_handlers(
    current_user: User = Depends(get_current_user)
):
    """获取可用的分片处理器"""
    handlers = list(shard_task_service.shard_handlers.keys())
    
    return {
        "handlers": [
            {
                "name": "shard_data_processing",
                "description": "分片数据处理演示处理器",
                "parameters": {
                    "total_items": "总数据量",
                    "shard_strategy": "分片策略"
                }
            },
            {
                "name": "shard_hash_processing",
                "description": "哈希分片处理演示处理器",
                "parameters": {
                    "total_items": "总数据量",
                    "hash_field": "哈希字段名"
                }
            },
            {
                "name": "shard_device_sync",
                "description": "设备数据同步分片处理器",
                "parameters": {
                    "device_list": "设备ID列表",
                    "sync_type": "同步类型"
                }
            }
        ],
        "total_handlers": len(handlers),
        "registered_handlers": handlers
    }


@router.get("/cluster/nodes")
async def get_cluster_nodes(
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """获取集群节点信息"""
    try:
        # 查询活跃的分片锁，获取节点信息
        result = db.execute("""
            SELECT 
                node_id,
                COUNT(*) as active_shards,
                MIN(created_at) as first_shard_time,
                MAX(expire_time) as last_expire_time,
                ARRAY_AGG(DISTINCT SPLIT_PART(lock_key, ':', 2)) as job_ids
            FROM distributed_locks 
            WHERE lock_key LIKE 'shard:%'
              AND expire_time >= NOW()
            GROUP BY node_id
            ORDER BY active_shards DESC
        """)
        
        nodes = []
        for row in result.fetchall():
            nodes.append({
                "node_id": row[0],
                "active_shards": row[1],
                "first_shard_time": row[2].isoformat() if row[2] else None,
                "last_expire_time": row[3].isoformat() if row[3] else None,
                "processing_jobs": row[4] or [],
                "status": "active"
            })
        
        return {
            "total_nodes": len(nodes),
            "total_active_shards": sum(node["active_shards"] for node in nodes),
            "nodes": nodes,
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"获取集群节点信息失败: {str(e)}"
        )


@router.delete("/cleanup/completed")
async def cleanup_completed_shards(
    job_id: Optional[UUID] = Query(None, description="特定任务ID，不指定则清理所有"),
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """清理已完成的分片锁"""
    try:
        if job_id:
            # 清理特定任务的分片锁
            result = db.execute("""
                DELETE FROM distributed_locks 
                WHERE lock_key LIKE :pattern
                  AND expire_time < NOW()
            """, {"pattern": f"shard:{job_id}:%"})
        else:
            # 清理所有过期的分片锁
            result = db.execute("""
                DELETE FROM distributed_locks 
                WHERE lock_key LIKE 'shard:%'
                  AND expire_time < NOW()
            """)
        
        db.commit()
        
        return {
            "message": f"已清理 {result.rowcount} 个过期分片锁",
            "cleaned_count": result.rowcount,
            "job_id": str(job_id) if job_id else "all"
        }
        
    except Exception as e:
        db.rollback()
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"清理分片锁失败: {str(e)}"
        )
