import asyncio
import time
from typing import List, Dict, Any, Optional
from datetime import datetime
from sqlalchemy.orm import Session
from app.crud.ai_workflow import ai_workflow
from app.models.ai_workflow import TaskStatus
from app.services.ai.workflow_processor import WorkflowProcessor
from app.core.database import SessionLocal
import threading


class BatchQueueManager:
    """批量处理队列管理器
    
    功能：
    1. 控制同时运行的AI工作流数量（而不是批次数量）
    2. 队列管理和任务调度
    3. 失败重试和状态监控
    4. 确保不超过Coze等服务的并发限制
    5. 全局队列：新建与续传任务加入pending，只有运行中任务少于阈值时转为running
    """
    
    def __init__(self, max_concurrent_workflows: int = 5, batch_size: int = 10):
        self.max_concurrent_workflows = max_concurrent_workflows  # 最大并发工作流数量
        self.batch_size = batch_size  # 每批处理的图片数量（用于进度显示）
        self.running_workflows = {}  # 正在运行的工作流 {workflow_id: task_info}
        self.pending_queue = []  # 等待队列（全局）
        self.completed_workflows = []  # 已完成的工作流
        self.failed_workflows = []  # 失败的工作流
        # self._lock 用于旧的异步进度更新
        self._lock = asyncio.Lock()  # 异步锁
        # 新增：调度器同步条件变量与线程
        self._cv = threading.Condition()
        self._dispatcher_thread = threading.Thread(target=self._dispatcher_loop, daemon=True)
        self._dispatcher_thread.start()
        
    async def process_large_batch(self, 
                                workflow_id: int, 
                                image_urls: List[str], 
                                user_prompt: str, 
                                variation_count: int,
                                db: Session) -> Dict[str, Any]:
        """处理大批量图片任务
        
        Args:
            workflow_id: 主工作流ID
            image_urls: 图片URL列表
            user_prompt: 用户提示词
            variation_count: 变体数量
            db: 数据库会话
            
        Returns:
            处理结果统计
        """
        total_images = len(image_urls)
        print(f"🚀 开始处理大批量任务: {total_images}张图片，每批{self.batch_size}张")
        
        # 更新主工作流状态为WAITING（等待开始处理）
        ai_workflow.update_workflow_status(
            db=db,
            workflow_id=workflow_id,
            status=TaskStatus.WAITING,
            current_step=f"等待处理 (0/{total_images})",
            progress=0
        )
        
        # 将图片分批
        batches = self._split_into_batches(image_urls)
        total_batches = len(batches)
        
        print(f"📦 分为{total_batches}个批次，每批最多{self.batch_size}张图片")
        
        # 初始化结果数据
        result_data = {
            "batch_info": {
                "total_images": total_images,
                "total_batches": total_batches,
                "batch_size": self.batch_size,
                "variation_count": variation_count,
                "processed_images": 0,
                "completed_batches": 0
            },
            "processed_images": [],
            "batch_results": [],
            "optimized_prompt": None,
            "mj_image_urls": [],
            "split_images": [],
            "flux_variations": [],
            "product_info": [],
            "detail_pages": []
        }
        
        # 保存初始结果数据
        ai_workflow.update_workflow_result(
            db=db,
            workflow_id=workflow_id,
            result_data=result_data
        )
        
        try:
            # 处理所有批次
            await self._process_all_batches(
                workflow_id, batches, user_prompt, variation_count, db
            )
            
            # 汇总最终结果
            final_result = await self._collect_final_results(workflow_id, db)
            
            # 更新最终状态
            ai_workflow.update_workflow_status(
                db=db,
                workflow_id=workflow_id,
                status=TaskStatus.COMPLETED,
                current_step="批量处理完成",
                progress=100
            )
            
            print(f"✅ 大批量任务处理完成: {workflow_id}")
            return final_result
            
        except Exception as e:
            error_msg = f"大批量处理失败: {str(e)}"
            print(f"❌ {error_msg}")
            
            ai_workflow.update_workflow_status(
                db=db,
                workflow_id=workflow_id,
                status=TaskStatus.FAILED,
                error_message=error_msg
            )
            raise Exception(error_msg)
    
    def _split_into_batches(self, image_urls: List[str]) -> List[List[str]]:
        """将图片URL列表分批"""
        batches = []
        for i in range(0, len(image_urls), self.batch_size):
            batch = image_urls[i:i + self.batch_size]
            batches.append(batch)
        return batches
    
    async def _process_all_batches(self, 
                                 workflow_id: int, 
                                 batches: List[List[str]], 
                                 user_prompt: str, 
                                 variation_count: int,
                                 db: Session):
        """处理所有图片（使用队列方式控制并发，确保只有运行中的任务显示running状态）"""
        # 将所有批次的图片合并为单个列表
        all_image_urls = []
        for batch in batches:
            all_image_urls.extend(batch)
        
        total_images = len(all_image_urls)
        print(f"🔄 开始处理 {total_images} 张图片，最大并发工作流数: {self.max_concurrent_workflows}")
        print(f"📊 并发控制配置: 工作流并发={self.max_concurrent_workflows}, 批次大小={self.batch_size}")
        
        # 创建任务队列
        task_queue = asyncio.Queue()
        for i, url in enumerate(all_image_urls):
            await task_queue.put((i, url))
        
        # 结果收集
        results = [None] * total_images
        successful_workflows = 0
        failed_workflows = 0
        completed_tasks = 0
        
        async def worker(worker_id):
            """工作协程，从队列中获取任务并处理"""
            nonlocal successful_workflows, failed_workflows, completed_tasks
            
            while True:
                try:
                    # 从队列获取任务（非阻塞）
                    image_index, image_url = await asyncio.wait_for(task_queue.get(), timeout=1.0)
                    print(f"🚀 Worker-{worker_id}: 开始处理第 {image_index + 1}/{total_images} 张图片: {image_url}")
                    
                    try:
                        result = await self._process_single_workflow(
                            image_index,
                            image_url,
                            workflow_id,
                            user_prompt,
                            variation_count,
                            db
                        )
                        results[image_index] = result
                        successful_workflows += 1
                        print(f"✅ Worker-{worker_id}: 第 {image_index + 1}/{total_images} 张图片处理完成")
                    except Exception as e:
                        error_msg = str(e)
                        results[image_index] = Exception(error_msg)
                        failed_workflows += 1
                        print(f"❌ Worker-{worker_id}: 第 {image_index + 1}/{total_images} 张图片处理失败: {error_msg}")
                    
                    completed_tasks += 1
                    print(f"📊 进度更新: {completed_tasks}/{total_images} 已完成 (成功: {successful_workflows}, 失败: {failed_workflows})")
                    
                    # 标记任务完成
                    task_queue.task_done()
                    
                except asyncio.TimeoutError:
                    # 队列为空，退出工作协程
                    print(f"🏁 Worker-{worker_id}: 队列为空，工作完成")
                    break
                except Exception as e:
                    print(f"❌ Worker-{worker_id}: 工作协程异常: {str(e)}")
                    break
        
        # 启动工作协程（数量等于最大并发数）
        print(f"🚦 启动 {self.max_concurrent_workflows} 个工作协程")
        workers = []
        for i in range(self.max_concurrent_workflows):
            worker_task = asyncio.create_task(worker(i + 1))
            workers.append(worker_task)
        
        # 等待所有任务完成
        await task_queue.join()
        
        # 取消所有工作协程
        for worker_task in workers:
            worker_task.cancel()
        
        # 等待工作协程结束
        await asyncio.gather(*workers, return_exceptions=True)
        
        print(f"📊 工作流处理统计: 成功 {successful_workflows}/{total_images}，失败 {failed_workflows}/{total_images}")
    
    async def _process_single_workflow(self, 
                                     image_index: int, 
                                     image_url: str, 
                                     workflow_id: int, 
                                     user_prompt: str, 
                                     variation_count: int,
                                     db: Session) -> Dict[str, Any]:
        """处理单个图片的完整工作流（带重试机制）"""
        print(f"🔄 开始处理第 {image_index + 1} 张图片: {image_url}")
        
        # 设置任务状态为RUNNING（真正开始处理）
        await self._update_workflow_progress(workflow_id, image_index, "running", db)
        
        # 重试配置
        max_retries = 3
        retry_delay = 2  # 秒
        
        for attempt in range(max_retries + 1):
            try:
                # 创建新的数据库会话用于单个工作流处理
                local_db = SessionLocal()
                
                try:
                    # 为这张图片执行完整的工作流处理
                    async with WorkflowProcessor(local_db) as processor:
                        result = await processor.execute_full_pipeline(
                            workflow_id=workflow_id,
                            image_path=image_url,
                            user_prompt=user_prompt
                        )
                    
                    # 更新工作流完成状态
                    await self._update_workflow_progress(workflow_id, image_index, "completed", db)
                    
                    print(f"✅ 第 {image_index + 1} 张图片处理完成")
                    return {"image_url": image_url, "status": "success", "result": result}
                    
                finally:
                    local_db.close()
                    
            except Exception as e:
                error_msg = str(e)
                is_last_attempt = attempt == max_retries
                
                # 检查是否为可重试的错误
                is_retryable_error = (
                    "Connector is closed" in error_msg or
                    "Connection pool is closed" in error_msg or
                    "Connection timeout" in error_msg or
                    "Server disconnected" in error_msg or
                    "排队上限" in error_msg or
                    "429" in error_msg or
                    "502" in error_msg or
                    "503" in error_msg or
                    "504" in error_msg
                )
                
                if is_retryable_error and not is_last_attempt:
                    print(f"⚠️ 第 {image_index + 1} 张图片处理遇到可重试错误 (尝试 {attempt + 1}/{max_retries + 1}): {error_msg}")
                    print(f"🔄 等待 {retry_delay} 秒后重试...")
                    await asyncio.sleep(retry_delay)
                    retry_delay *= 1.5  # 指数退避
                    continue
                else:
                    # 不可重试的错误或已达到最大重试次数
                    final_error_msg = f"第 {image_index + 1} 张图片处理失败: {error_msg}"
                    if is_retryable_error:
                        final_error_msg += f" (已重试 {max_retries} 次)"
                    
                    print(f"❌ {final_error_msg}")
                    
                    # 更新工作流失败状态
                    await self._update_workflow_progress(workflow_id, image_index, "failed", db)
                    
                    return {"image_url": image_url, "status": "failed", "error": error_msg}
        
        # 理论上不会到达这里
        return {"image_url": image_url, "status": "failed", "error": "未知错误"}
    
    async def _update_workflow_progress(self, 
                                      workflow_id: int, 
                                      image_index: int, 
                                      status: str, 
                                      db: Session):
        """更新单个工作流进度"""
        async with self._lock:
            # 获取当前工作流数据
            workflow = ai_workflow.get_workflow(db=db, workflow_id=workflow_id)
            if not workflow:
                return
            
            result_data = workflow.result_data or {}
            batch_info = result_data.get("batch_info", {})
            
            # 更新工作流状态
            if status == "completed":
                batch_info["processed_images"] = batch_info.get("processed_images", 0) + 1
            
            # 计算总体进度
            total_images = batch_info.get("total_images", 1)
            processed_images = batch_info.get("processed_images", 0)
            progress = int((processed_images / total_images) * 100)
            
            # 根据任务状态确定工作流状态
            if status == "running":
                # 有任务正在运行，主工作流状态为RUNNING
                task_status = TaskStatus.RUNNING
                current_step = f"处理中 ({processed_images}/{total_images})"
            elif processed_images == 0:
                # 还没有任务完成，主工作流状态为WAITING
                task_status = TaskStatus.WAITING
                current_step = f"等待处理 (0/{total_images})"
            elif processed_images < total_images:
                # 部分任务完成，主工作流状态为RUNNING
                task_status = TaskStatus.RUNNING
                current_step = f"处理中 ({processed_images}/{total_images})"
            else:
                # 所有任务完成，主工作流状态为COMPLETED
                task_status = TaskStatus.COMPLETED
                current_step = f"处理完成 ({processed_images}/{total_images})"
            
            # 更新工作流状态
            ai_workflow.update_workflow_status(
                db=db,
                workflow_id=workflow_id,
                status=task_status,
                current_step=current_step,
                progress=progress
            )
            
            # 更新结果数据
            result_data["batch_info"] = batch_info
            ai_workflow.update_workflow_result(
                db=db,
                workflow_id=workflow_id,
                result_data=result_data
            )
    
    async def _collect_final_results(self, workflow_id: int, db: Session) -> Dict[str, Any]:
        """收集最终结果"""
        workflow = ai_workflow.get_workflow(db=db, workflow_id=workflow_id)
        if not workflow:
            raise Exception("工作流不存在")
        
        result_data = workflow.result_data or {}
        
        # 统计最终结果
        batch_info = result_data.get("batch_info", {})
        total_images = batch_info.get("total_images", 0)
        completed_batches = batch_info.get("completed_batches", 0)
        total_batches = batch_info.get("total_batches", 0)
        
        # 统计各类结果数量
        flux_variations = result_data.get("flux_variations", [])
        product_info = result_data.get("product_info", [])
        detail_pages = result_data.get("detail_pages", [])
        
        final_stats = {
            "total_images": total_images,
            "total_batches": total_batches,
            "completed_batches": completed_batches,
            "success_rate": (completed_batches / total_batches * 100) if total_batches > 0 else 0,
            "flux_variations_count": len(flux_variations),
            "product_info_count": len(product_info),
            "detail_pages_count": len(detail_pages)
        }
        
        print(f"📊 最终统计: {final_stats}")
        
        return result_data
    
    # ===================== 全局调度：pending/running 队列（放入类中） =====================
    def enqueue_new_workflow(self, workflow_id: int, image_url: str, user_prompt: str, db: Session):
        """将新建任务加入全局等待队列，并将任务状态设为WAITING"""
        try:
            ai_workflow.update_workflow_status(
                db=db,
                workflow_id=workflow_id,
                status=TaskStatus.WAITING,
                current_step="等待处理",
                progress=0
            )
        except Exception:
            # 即使状态更新失败，也尝试入队，避免阻塞
            pass
        task = {
            "type": "process",
            "workflow_id": workflow_id,
            "image_url": image_url,
            "user_prompt": user_prompt,
        }
        with self._cv:
            self.pending_queue.append(task)
            self._cv.notify()
    
    def enqueue_resume(self, workflow_id: int, db: Session):
        """将续传任务加入全局等待队列，并将任务状态设为WAITING"""
        try:
            ai_workflow.update_workflow_status(
                db=db,
                workflow_id=workflow_id,
                status=TaskStatus.WAITING,
                current_step="等待续传",
            )
        except Exception:
            pass
        task = {
            "type": "resume",
            "workflow_id": workflow_id,
        }
        with self._cv:
            self.pending_queue.append(task)
            self._cv.notify()
    
    def _dispatcher_loop(self):
        """调度器线程：仅当运行中任务数少于阈值时，从pending中取任务转为running"""
        while True:
            with self._cv:
                while len(self.running_workflows) >= self.max_concurrent_workflows or not self.pending_queue:
                    self._cv.wait()
                task = self.pending_queue.pop(0)
                wf_id = task["workflow_id"]
                # 标记为running（仅用于计数与可观测）
                self.running_workflows[wf_id] = {
                    "task_type": task["type"],
                    "start_time": time.time(),
                }
            # 启动工作线程执行任务
            threading.Thread(target=self._run_task_wrapper, args=(task,), daemon=True).start()
    
    def _run_task_wrapper(self, task: Dict[str, Any]):
        workflow_id = task["workflow_id"]
        try:
            if task["type"] == "process":
                self._process_new_workflow_sync(task)
            else:
                self._process_resume_sync(task)
        except Exception as e:
            # 发生异常时标记失败
            _db = SessionLocal()
            try:
                ai_workflow.update_workflow_status(
                    db=_db,
                    workflow_id=workflow_id,
                    status=TaskStatus.FAILED,
                    error_message=f"任务处理失败: {str(e)}"
                )
            finally:
                _db.close()
        finally:
            with self._cv:
                if workflow_id in self.running_workflows:
                    del self.running_workflows[workflow_id]
                self._cv.notify_all()
    
    def _process_new_workflow_sync(self, task: Dict[str, Any]):
        workflow_id = task["workflow_id"]
        image_url = task["image_url"]
        user_prompt = task["user_prompt"]
        # 转为RUNNING状态，仅在真正开始执行时
        db = SessionLocal()
        try:
            ai_workflow.update_workflow_status(
                db=db,
                workflow_id=workflow_id,
                status=TaskStatus.RUNNING,
                current_step="开始处理",
                progress=0
            )
        finally:
            db.close()
        
        async def _coro():
            local_db = SessionLocal()
            try:
                async with WorkflowProcessor(local_db) as processor:
                    await processor.execute_full_pipeline(workflow_id, image_url, user_prompt)
            finally:
                local_db.close()
        
        asyncio.run(_coro())
    
    def _process_resume_sync(self, task: Dict[str, Any]):
        workflow_id = task["workflow_id"]
        # 更新为RUNNING
        db = SessionLocal()
        try:
            ai_workflow.update_workflow_status(
                db=db,
                workflow_id=workflow_id,
                status=TaskStatus.RUNNING,
                current_step="resume_processing"
            )
        finally:
            db.close()
        
        async def _coro():
            local_db = SessionLocal()
            try:
                async with WorkflowProcessor(local_db) as processor:
                    await processor.resume_workflow(workflow_id)
            finally:
                local_db.close()
        
        try:
            asyncio.run(_coro())
            db2 = SessionLocal()
            try:
                ai_workflow.update_workflow_status(
                    db=db2,
                    workflow_id=workflow_id,
                    status=TaskStatus.COMPLETED,
                    progress=100
                )
            finally:
                db2.close()
        except Exception as e:
            db3 = SessionLocal()
            try:
                ai_workflow.update_workflow_status(
                    db=db3,
                    workflow_id=workflow_id,
                    status=TaskStatus.FAILED,
                    error_message=f"断点续传异常: {str(e)}"
                )
            finally:
                db3.close()
            raise

    # 新增：动态更新配置
    def update_config(self, max_concurrent_workflows: Optional[int] = None, batch_size: Optional[int] = None):
        """动态更新并发与批次配置（线程安全）"""
        with self._cv:
            if max_concurrent_workflows is not None and max_concurrent_workflows > 0:
                self.max_concurrent_workflows = max_concurrent_workflows
            if batch_size is not None and batch_size > 0:
                self.batch_size = batch_size
            # 通知调度器线程以便尽快按照新配置调度
            self._cv.notify_all()


# 全局队列管理器实例
_batch_queue_manager = None

def get_batch_queue_manager(max_concurrent_workflows: int = 5, batch_size: int = 10) -> BatchQueueManager:
    """获取批量队列管理器实例"""
    global _batch_queue_manager
    if _batch_queue_manager is None:
        _batch_queue_manager = BatchQueueManager(
            max_concurrent_workflows=max_concurrent_workflows,
            batch_size=batch_size
        )
    else:
        # 若实例已存在，动态刷新配置
        _batch_queue_manager.update_config(
            max_concurrent_workflows=max_concurrent_workflows,
            batch_size=batch_size
        )
    return _batch_queue_manager