import asyncio
import logging
from typing import Dict, Any, Optional
from celery import current_task
from sqlalchemy.orm import Session
from datetime import datetime, timedelta

from app.celery_app import celery_app
from app.db.session import SessionLocal
from app.crud.crud_generation_job import update_job
from app.models.generation_job import GenerationJob, JobStatus
from app.services.ai_manager import ai_service_manager
from app.services.storage_manager import storage_manager
from app.services.ai_base import AIServiceProvider, AIServiceType, GenerationRequest
from app.services.storage_base import FileType
from app.core.config import settings

logger = logging.getLogger(__name__)

class TaskProgressTracker:
    """任务进度跟踪器"""
    
    def __init__(self, task, job_id: int):
        self.task = task
        self.job_id = job_id
        self.progress = 0
        self.status_message = "任务初始化"
    
    def update_progress(self, progress: int, message: str):
        """更新任务进度"""
        self.progress = min(100, max(0, progress))
        self.status_message = message
        
        if self.task:
            self.task.update_state(
                state="PROGRESS", 
                meta={
                    "current": self.progress, 
                    "total": 100, 
                    "status": message,
                    "job_id": self.job_id
                }
            )
        
        logger.info(f"Job {self.job_id}: {progress}% - {message}")

@celery_app.task(bind=True, max_retries=3)
def enhanced_generate_video_task(self, job_id: int, provider: str = None) -> Dict[str, Any]:
    """增强的视频生成任务"""
    
    db: Session = SessionLocal()
    tracker = TaskProgressTracker(self, job_id)
    
    try:
        # 获取任务信息
        tracker.update_progress(5, "获取任务信息...")
        job = db.query(GenerationJob).filter(GenerationJob.id == job_id).first()
        if not job:
            raise ValueError(f"Job {job_id} not found")
        
        # 更新任务状态
        update_job(db, db_obj=job, obj_in={"status": JobStatus.PROCESSING})
        
        # 选择AI服务提供商
        tracker.update_progress(10, "初始化AI服务...")
        ai_provider = _select_ai_provider(provider, AIServiceType.VIDEO_GENERATION)
        ai_service = ai_service_manager.get_service(ai_provider)
        
        if not ai_service:
            raise RuntimeError(f"AI服务 {ai_provider.value} 不可用")
        
        # 创建生成请求
        tracker.update_progress(15, "准备生成请求...")
        generation_request = GenerationRequest(
            prompt=job.prompt,
            service_type=AIServiceType.VIDEO_GENERATION,
            params=job.params or {},
            user_id=job.user_id,
            job_id=job_id
        )
        
        # 验证请求
        if not ai_service.validate_request(generation_request):
            raise ValueError("请求参数验证失败")
        
        # 估算成本
        estimated_cost = ai_service.calculate_cost(generation_request)
        logger.info(f"Job {job_id}: Estimated cost: ${estimated_cost}")
        
        # 执行AI生成
        tracker.update_progress(25, f"正在使用 {ai_provider.value} 生成视频...")
        
        # 运行异步AI服务
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        try:
            ai_response = loop.run_until_complete(ai_service.generate_video(generation_request))
        finally:
            loop.close()
        
        if not ai_response.success:
            raise RuntimeError(f"视频生成失败: {ai_response.error_message}")
        
        tracker.update_progress(70, "视频生成完成，准备上传...")
        
        # 处理生成结果
        result_url = ai_response.result_url
        final_cost = ai_response.cost
        
        # 如果有二进制内容需要上传到存储
        if hasattr(ai_response, 'content') and ai_response.content:
            tracker.update_progress(80, "上传视频到存储...")
            storage_service = storage_manager.get_service()
            if storage_service:
                storage_result = loop.run_until_complete(
                    storage_service.upload_file(
                        file_data=ai_response.content,
                        file_name=f"generated_video_{job_id}.mp4",
                        file_type=FileType.GENERATED_VIDEO,
                        content_type="video/mp4"
                    )
                )
                if storage_result.success:
                    result_url = storage_result.file_url
        
        tracker.update_progress(90, "更新数据库...")
        
        # 更新任务完成状态
        update_data = {
            "status": JobStatus.COMPLETED,
            "result_url": result_url,
            "completed_at": datetime.now(),
            "cost": final_cost,
            "metadata": ai_response.metadata
        }
        update_job(db, db_obj=job, obj_in=update_data)
        
        tracker.update_progress(100, "任务完成")
        
        return {
            "job_id": job_id,
            "status": "completed",
            "result_url": result_url,
            "cost": final_cost,
            "provider": ai_provider.value,
            "metadata": ai_response.metadata
        }
        
    except Exception as exc:
        logger.error(f"视频生成任务失败 (Job {job_id}): {exc}")
        
        # 更新任务为失败状态
        if 'job' in locals():
            error_data = {
                "status": JobStatus.FAILED,
                "error_message": str(exc),
                "failed_at": datetime.now()
            }
            update_job(db, db_obj=job, obj_in=error_data)
        
        tracker.update_progress(100, f"任务失败: {str(exc)}")
        
        # 重试逻辑
        if self.request.retries < self.max_retries:
            # 指数退避重试
            countdown = 2 ** self.request.retries * 60  # 1分钟、2分钟、4分钟
            logger.info(f"Job {job_id}: Retrying in {countdown} seconds...")
            raise self.retry(countdown=countdown, exc=exc)
        
        raise exc
        
    finally:
        db.close()

@celery_app.task(bind=True, max_retries=3)
def enhanced_generate_image_task(self, job_id: int, provider: str = None) -> Dict[str, Any]:
    """增强的图像生成任务"""
    
    db: Session = SessionLocal()
    tracker = TaskProgressTracker(self, job_id)
    
    try:
        tracker.update_progress(5, "获取任务信息...")
        job = db.query(GenerationJob).filter(GenerationJob.id == job_id).first()
        if not job:
            raise ValueError(f"Job {job_id} not found")
        
        update_job(db, db_obj=job, obj_in={"status": JobStatus.PROCESSING})
        
        # 选择AI服务提供商
        tracker.update_progress(10, "初始化AI服务...")
        ai_provider = _select_ai_provider(provider, AIServiceType.IMAGE_GENERATION)
        ai_service = ai_service_manager.get_service(ai_provider)
        
        if not ai_service:
            raise RuntimeError(f"AI服务 {ai_provider.value} 不可用")
        
        # 创建生成请求
        generation_request = GenerationRequest(
            prompt=job.prompt,
            service_type=AIServiceType.IMAGE_GENERATION,
            params=job.params or {},
            user_id=job.user_id,
            job_id=job_id
        )
        
        tracker.update_progress(25, f"正在使用 {ai_provider.value} 生成图片...")
        
        # 执行AI生成
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        try:
            ai_response = loop.run_until_complete(ai_service.generate_image(generation_request))
        finally:
            loop.close()
        
        if not ai_response.success:
            raise RuntimeError(f"图片生成失败: {ai_response.error_message}")
        
        tracker.update_progress(70, "图片生成完成...")
        
        result_url = ai_response.result_url
        final_cost = ai_response.cost
        
        # 处理生成结果（上传逻辑类似视频）
        tracker.update_progress(90, "更新数据库...")
        
        update_data = {
            "status": JobStatus.COMPLETED,
            "result_url": result_url,
            "completed_at": datetime.now(),
            "cost": final_cost,
            "metadata": ai_response.metadata
        }
        update_job(db, db_obj=job, obj_in=update_data)
        
        tracker.update_progress(100, "任务完成")
        
        return {
            "job_id": job_id,
            "status": "completed",
            "result_url": result_url,
            "cost": final_cost,
            "provider": ai_provider.value
        }
        
    except Exception as exc:
        logger.error(f"图像生成任务失败 (Job {job_id}): {exc}")
        
        if 'job' in locals():
            update_job(db, db_obj=job, obj_in={
                "status": JobStatus.FAILED,
                "error_message": str(exc),
                "failed_at": datetime.now()
            })
        
        tracker.update_progress(100, f"任务失败: {str(exc)}")
        
        if self.request.retries < self.max_retries:
            countdown = 2 ** self.request.retries * 60
            raise self.retry(countdown=countdown, exc=exc)
        
        raise exc
        
    finally:
        db.close()

@celery_app.task(bind=True)
def batch_generation_task(self, job_ids: list, provider: str = None) -> Dict[str, Any]:
    """批量生成任务"""
    
    results = []
    failed_jobs = []
    
    for job_id in job_ids:
        try:
            # 根据任务类型选择对应的生成任务
            db = SessionLocal()
            job = db.query(GenerationJob).filter(GenerationJob.id == job_id).first()
            db.close()
            
            if not job:
                failed_jobs.append({"job_id": job_id, "error": "Job not found"})
                continue
            
            if "video" in (job.params or {}).get("type", "").lower():
                result = enhanced_generate_video_task.delay(job_id, provider)
            else:
                result = enhanced_generate_image_task.delay(job_id, provider)
            
            results.append({"job_id": job_id, "task_id": result.id})
            
        except Exception as e:
            failed_jobs.append({"job_id": job_id, "error": str(e)})
    
    return {
        "batch_id": self.request.id,
        "total_jobs": len(job_ids),
        "successful_jobs": len(results),
        "failed_jobs": len(failed_jobs),
        "results": results,
        "failures": failed_jobs
    }

def _select_ai_provider(preferred_provider: str, service_type: AIServiceType) -> AIServiceProvider:
    """选择合适的AI服务提供商"""
    
    # 如果指定了提供商，优先使用
    if preferred_provider:
        try:
            provider = AIServiceProvider(preferred_provider)
            service = ai_service_manager.get_service(provider)
            if service and service_type in service.get_supported_services():
                return provider
        except ValueError:
            logger.warning(f"指定的提供商 {preferred_provider} 不支持或不可用")
    
    # 自动选择最佳提供商
    available_services = ai_service_manager.get_services_by_type(service_type)
    
    if not available_services:
        raise RuntimeError(f"没有可用的 {service_type.value} 服务")
    
    # 优先级策略：即梦AI > Google Veo 3 > Stability AI > OpenAI
    priority_order = [
        AIServiceProvider.JIMENG_AI,
        AIServiceProvider.GOOGLE_VEO3,
        AIServiceProvider.STABILITY_AI,
        AIServiceProvider.OPENAI_DALLE,
    ]
    
    for provider in priority_order:
        if provider in available_services:
            return provider
    
    # 如果没有匹配的优先级提供商，使用第一个可用的
    return list(available_services.keys())[0]

# 任务监控和清理
@celery_app.task
def cleanup_failed_jobs():
    """清理失败的任务"""
    
    db = SessionLocal()
    try:
        # 清理超过24小时的失败任务
        cutoff_time = datetime.now() - timedelta(hours=24)
        failed_jobs = db.query(GenerationJob).filter(
            GenerationJob.status == JobStatus.FAILED,
            GenerationJob.failed_at < cutoff_time
        ).all()
        
        cleaned_count = 0
        for job in failed_jobs:
            # 可以在这里添加清理逻辑，比如删除临时文件
            logger.info(f"清理失败任务 {job.id}")
            cleaned_count += 1
        
        return {"cleaned_jobs": cleaned_count}
        
    finally:
        db.close()

@celery_app.task
def task_health_check():
    """任务健康检查"""
    
    db = SessionLocal()
    try:
        # 检查长时间处理中的任务
        processing_timeout = datetime.now() - timedelta(hours=2)
        stuck_jobs = db.query(GenerationJob).filter(
            GenerationJob.status == JobStatus.PROCESSING,
            GenerationJob.created_at < processing_timeout
        ).all()
        
        if stuck_jobs:
            logger.warning(f"发现 {len(stuck_jobs)} 个可能卡住的任务")
            
            # 重置卡住的任务
            for job in stuck_jobs:
                update_job(db, db_obj=job, obj_in={
                    "status": JobStatus.FAILED,
                    "error_message": "任务超时，自动重置",
                    "failed_at": datetime.now()
                })
        
        return {
            "status": "healthy",
            "stuck_jobs_reset": len(stuck_jobs)
        }
        
    finally:
        db.close()