from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks, Body
from sqlalchemy.orm import Session
from sqlalchemy import func
from app.api.deps import get_db, get_current_user
from app.models.user import User
from app.models.ai_workflow import AIWorkflow, TaskStatus, StepStatus
from app.schemas.ai_workflow import (
    AIWorkflowCreate, AIWorkflowResponse, BatchImageProcessRequest, BatchImageProcessResponse,
    BatchCompletionDetailsRequest, BatchCompletionDetailsResponse, WorkflowCompletionDetails
)
from app.crud.ai_workflow import ai_workflow
from app.crud.image_task import image_task
from app.services.tencent.cos_service import TencentCOSService
from app.services.image.processor import ImageProcessor
from app.services.ai.workflow_processor import WorkflowProcessor
from app.services.ai.rate_limiter import get_global_limiter, APIType
from app.services.ai.batch_queue_manager import get_batch_queue_manager
import json
import os
import uuid
import traceback
import asyncio
import threading
from datetime import datetime
from app.core.database import SessionLocal

router = APIRouter()

@router.get("/workflows", response_model=List[AIWorkflowResponse])
def get_user_workflows(
    *,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user),
    skip: int = 0,
    limit: int = 100,
    status: Optional[str] = None
):
    """获取用户的工作流列表"""
    if status:
        try:
            task_status = TaskStatus(status)
            workflows = ai_workflow.get_user_workflows_by_status(
                db=db, user_id=current_user.id, status=task_status, skip=skip, limit=limit
            )
        except ValueError:
            raise HTTPException(status_code=400, detail="无效的状态值")
    else:
        workflows = ai_workflow.get_user_workflows(
            db=db, user_id=current_user.id, skip=skip, limit=limit
        )
    
    return workflows


@router.delete("/{workflow_id}")
def cancel_workflow_task(
    workflow_id: int,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """取消/删除工作流任务"""
    workflow = ai_workflow.get_workflow(db=db, workflow_id=workflow_id)
    if not workflow:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    if workflow.status in [TaskStatus.RUNNING, TaskStatus.PENDING, TaskStatus.WAITING]:
        # 运行中的任务标记为取消
        ai_workflow.update_workflow_status(
            db=db, workflow_id=workflow_id, 
            status=TaskStatus.CANCELLED
        )
        return {"code": 200, "msg": "任务已取消"}
    else:
        # 其他状态的任务（已完成、已取消、失败等）直接删除
        ai_workflow.delete_workflow(db=db, workflow_id=workflow_id)
        return {"code": 200, "msg": "任务已删除"}


# 添加依赖注入
def get_cos_service() -> TencentCOSService:
    return TencentCOSService()

def get_image_processor() -> ImageProcessor:
    return ImageProcessor()

def _fetch_all_workflow_images(db: Session, workflow_id: int, page_size: int = 1000):
    """分页拉取某个 workflow 下的全部图片，避免默认 limit 截断"""
    from app.crud.image import image_crud
    all_images = []
    offset = 0
    while True:
        batch = image_crud.get_images_by_group(
            db=db,
            group_id=f'workflow_{workflow_id}',
            skip=offset,
            limit=page_size
        )
        if not batch:
            break
        all_images.extend(batch)
        if len(batch) < page_size:
            break
        offset += page_size
    return all_images

@router.post("/process-image", response_model=BatchImageProcessResponse)
async def process_image(
    *,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user),
    background_tasks: BackgroundTasks,
    request: BatchImageProcessRequest = Body(...),
    cos_service: TencentCOSService = Depends(get_cos_service),
    processor: ImageProcessor = Depends(get_image_processor)
):
    """处理图片的AI工作流接口 - 每张图片启动一个独立任务"""
    
    # 直接使用传入的图片URL数组
    image_urls = request.image_path
    
    # 验证图片URL列表
    if not image_urls:
        raise HTTPException(status_code=400, detail="图片URL列表不能为空")
    
    # 验证每个URL格式
    for url in image_urls:
        if not url or not isinstance(url, str):
            raise HTTPException(status_code=400, detail="图片URL格式无效")
    
    # 验证输入参数
    if len(image_urls) > 100:
        raise HTTPException(status_code=400, detail="最多支持100张图片")
    
    if request.variation_count < 1 or request.variation_count > 10:
        raise HTTPException(status_code=400, detail="裂变数量必须在1-10之间")
    
    try:
        # 为每张图片创建独立的工作流任务
        workflow_ids = []
        manager = get_batch_queue_manager(max_concurrent_workflows=5)
        for i, image_url in enumerate(image_urls):
            # 生成每个任务的名称
            task_name = f"{request.batch_name or '图片处理'}_{i+1}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
            
            # 创建单张图片的工作流数据
            workflow_data = AIWorkflowCreate(
                task_name=task_name,
                input_data={
                    "image_url": image_url,  # 单张图片URL
                    "user_prompt": request.prompt,
                    "variation_count": request.variation_count,
                    "total_images": 1,  # 每个任务只处理一张图片
                    "processing_mode": "queue_batch"  # 标记为队列批处理模式
                }
            )
            
            workflow = ai_workflow.create_workflow(
                db=db,
                obj_in=workflow_data,
                user_id=current_user.id
            )
            
            workflow_ids.append(workflow.id)
            manager.enqueue_new_workflow(workflow.id, image_url, request.prompt, db)
        
        # 构建响应数据
        response_data = {
            "workflow_ids": workflow_ids,  # 返回所有工作流ID数组
            "total_images": len(image_urls),
            "status": "queued",  # 状态改为排队中
            "max_concurrent": 5
        }
        
        return BatchImageProcessResponse(
            code=200,
            msg=f"成功创建 {len(workflow_ids)} 个任务，已加入全局队列等待执行",
            data=response_data
        )
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"任务创建失败: {str(e)}")

@router.post("/process-image-batch-queue", response_model=BatchImageProcessResponse)
async def process_image_batch_queue(
    *,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user),
    background_tasks: BackgroundTasks,
    request: BatchImageProcessRequest = Body(...),
    batch_size: int = 10,  # 每批处理的图片数量
    max_concurrent_batches: int = 3  # 最大并发工作流数（修改为5个5个运行）
):
    """分批队列处理图片的AI工作流接口 - 支持大批量图片分批处理"""
    
    # 直接使用传入的图片URL数组
    image_urls = request.image_path
    
    # 验证图片URL列表
    if not image_urls:
        raise HTTPException(status_code=400, detail="图片URL列表不能为空")
    
    # 验证每个URL格式
    for url in image_urls:
        if not url or not isinstance(url, str):
            raise HTTPException(status_code=400, detail="图片URL格式无效")
    
    # 验证输入参数
    if len(image_urls) > 1000:  # 支持更大批量
        raise HTTPException(status_code=400, detail="最多支持1000张图片")
    
    if request.variation_count < 1 or request.variation_count > 10:
        raise HTTPException(status_code=400, detail="裂变数量必须在1-10之间")
    
    if batch_size < 1 or batch_size > 50:
        raise HTTPException(status_code=400, detail="批次大小必须在1-50之间")
    
    if max_concurrent_batches < 1 or max_concurrent_batches > 5:
        raise HTTPException(status_code=400, detail="最大并发批次数必须在1-5之间")
    
    try:
        # 创建主工作流任务
        task_name = f"{request.batch_name or '批量图片处理'}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
        
        workflow_data = AIWorkflowCreate(
            task_name=task_name,
            input_data={
                "image_urls": image_urls,
                "user_prompt": request.prompt,
                "variation_count": request.variation_count,
                "total_images": len(image_urls),
                "batch_size": batch_size,
                "max_concurrent_batches": max_concurrent_batches,
                "processing_mode": "batch_queue_global"
            }
        )
        
        workflow = ai_workflow.create_workflow(
            db=db,
            obj_in=workflow_data,
            user_id=current_user.id
        )
        
        # 将每张图片拆成独立子工作流并加入全局队列
        # 使用请求参数动态更新全局并发与批次配置
        manager = get_batch_queue_manager(max_concurrent_workflows=max_concurrent_batches, batch_size=batch_size)
        child_workflow_ids = []
        for i, image_url in enumerate(image_urls):
            child_task_name = f"{request.batch_name or '批量图片处理-子任务'}_{i+1}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
            child_data = AIWorkflowCreate(
                task_name=child_task_name,
                input_data={
                    "image_url": image_url,
                    "user_prompt": request.prompt,
                    "variation_count": request.variation_count,
                    "total_images": 1,
                    "processing_mode": "queue_batch_child",
                    "parent_workflow_id": workflow.id
                }
            )
            child = ai_workflow.create_workflow(
                db=db,
                obj_in=child_data,
                user_id=current_user.id
            )
            child_workflow_ids.append(child.id)
            manager.enqueue_new_workflow(child.id, image_url, request.prompt, db)
        
        # 构建响应数据
        total_batches = (len(image_urls) + batch_size - 1) // batch_size
        response_data = {
            "workflow_id": workflow.id,
            "workflow_ids": child_workflow_ids,
            "total_images": len(image_urls),
            "batch_size": batch_size,
            "total_batches": total_batches,
            "max_concurrent_batches": max_concurrent_batches,
            "status": "queued",
            "processing_mode": "batch_queue_global",
            "max_concurrent": 5
        }
        
        return BatchImageProcessResponse(
            code=200,
            msg=f"成功创建 {len(child_workflow_ids)} 个子任务，已加入全局队列等待执行",
            data=response_data
        )
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"分批队列任务创建失败: {str(e)}")

@router.get("/status/{workflow_id}")
def get_processing_status(
    workflow_id: int,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """获取处理状态（支持批量处理状态查询）"""
    workflow = ai_workflow.get_workflow(db=db, workflow_id=workflow_id)
    if not workflow:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    # 基础状态信息
    status_data = {
        "workflow_id": workflow.id,
        "status": workflow.status,
        "progress": workflow.progress,
        "current_step": workflow.current_step,
        "error": workflow.error_message,
        "created_at": workflow.created_at,
        "updated_at": workflow.updated_at
    }
    
    # 如果是批量处理任务，添加批量处理相关信息
    input_data = workflow.input_data or {}
    result_data = workflow.result_data or {}  # 初始化 result_data
    
    if "image_urls" in input_data:  # 批量处理任务
        total_images = input_data.get("total_images", len(input_data.get("image_urls", [])))
        image_task_id = input_data.get("image_task_id")
        
        # 添加批量处理特有信息
        status_data.update({
            "batch_info": {
                "total_images": total_images,
                "image_task_id": image_task_id,
                "variation_count": input_data.get("variation_count", 1)
            }
        })

        # 父任务聚合子任务进度（仅在全局队列父任务中生效）
        if input_data.get("processing_mode") == "batch_queue_global" and not input_data.get("parent_workflow_id"):
            try:
                children = db.query(AIWorkflow).filter(
                    func.JSON_UNQUOTE(func.JSON_EXTRACT(AIWorkflow.input_data, '$.parent_workflow_id')) == str(workflow.id)
                ).all()
                total_children = len(children)
                if total_children > 0:
                    completed = sum(1 for c in children if c.status == TaskStatus.COMPLETED)
                    running = sum(1 for c in children if c.status == TaskStatus.RUNNING)
                    failed = sum(1 for c in children if c.status == TaskStatus.FAILED)
                    waiting = sum(1 for c in children if c.status in [TaskStatus.PENDING, TaskStatus.WAITING])

                    # 聚合进度：子任务完成比例
                    agg_progress = int(completed / total_children * 100)

                    # 推导父任务状态
                    if completed + failed == total_children:
                        parent_status = TaskStatus.FAILED if failed > 0 else TaskStatus.COMPLETED
                    elif running > 0:
                        parent_status = TaskStatus.RUNNING
                    else:
                        parent_status = TaskStatus.WAITING

                    current_step = f"子任务进度 完成:{completed} 运行:{running} 等待:{waiting} 失败:{failed} / 共{total_children}"

                    # 持久化父任务状态与进度
                    ai_workflow.update_workflow_status(
                        db=db,
                        workflow_id=workflow.id,
                        status=parent_status,
                        current_step=current_step,
                        progress=agg_progress
                    )

                    # 更新响应中的 batch_info 统计并同步写入 result_data
                    batch_info = status_data.get("batch_info", {})
                    batch_info.update({
                        "total_children": total_children,
                        "processed_images": completed,
                        "children_summary": {
                            "completed": completed,
                            "running": running,
                            "waiting": waiting,
                            "failed": failed
                        }
                    })
                    status_data["batch_info"] = batch_info

                    # 将 batch_info 合并到 result_data
                    rd = workflow.result_data or {}
                    rd.setdefault("batch_info", {})
                    rd["batch_info"].update(batch_info)
                    ai_workflow.update_workflow_result(db=db, workflow_id=workflow.id, result_data=rd)
            except Exception as e:
                # 聚合失败不影响查询
                print(f"aggregate children status error: {e}")
        
        # 如果有结果数据，提供批量处理的统计信息
        if result_data:
            # 统计批量处理结果
            processed_images = result_data.get("processed_images", [])
            
            # 如果没有 processed_images，尝试从根级别字段获取（兼容旧数据结构）
            if not processed_images:
                # 🔥 新增：如果字段为空，从images表中重新获取
                total_mj_images = len(result_data.get("mj_image_urls", []))
                total_split_images = len(result_data.get("split_images", []))
                total_flux_variations = len(result_data.get("flux_variations", []))
                
                if total_mj_images == 0 or total_split_images == 0 or total_flux_variations == 0:
                    import json
                    
                    # 统一使用分页拉取全量，避免截断
                    # 获取该工作流的所有图片
                    workflow_images = _fetch_all_workflow_images(db, workflow_id)
                    # 重新构建字段数据 - 只重建为空的字段
                    mj_images = result_data.get("mj_image_urls", []) if total_mj_images > 0 else []
                    split_images = result_data.get("split_images", []) if total_split_images > 0 else []
                    flux_variations = result_data.get("flux_variations", []) if total_flux_variations > 0 else []
                    
                    for i, img in enumerate(workflow_images):
                        if img.tencent_response:
                            try:
                                response_data = json.loads(img.tencent_response)
                                source = response_data.get('source', '')
                                
                                if source == 'mj_generation' and total_mj_images == 0:
                                    mj_images.append(img.cos_url)
                                elif source == 'mj_split' and total_split_images == 0:
                                    split_images.append({
                                        'id': img.id,
                                        'entity_id': img.entity_id,
                                        'url': img.cos_url,
                                        'file_name': img.pic_name,
                                        'split_index': response_data.get('split_index', 0)
                                    })
                                elif source == 'flux_variation' and total_flux_variations == 0:
                                    flux_variations.append({
                                        'id': img.id,
                                        'entity_id': img.entity_id,
                                        'url': img.cos_url,
                                        'file_name': img.pic_name,
                                        'source_image_index': response_data.get('source_image_index', 0),
                                        'variation_index': response_data.get('variation_index', 0)
                                    })
                            except Exception as e:
                                print(f"❌ JSON解析失败: {str(e)}")
                    
                    # 更新result_data - 只更新为空的字段
                    updated = False
                    if total_mj_images == 0 and mj_images:
                        result_data["mj_image_urls"] = mj_images
                        updated = True
                    if total_split_images == 0 and split_images:
                        result_data["split_images"] = split_images
                        updated = True
                    if total_flux_variations == 0 and flux_variations:
                        result_data["flux_variations"] = flux_variations
                        updated = True
                    
                    # 保存更新后的result_data
                    if updated:
                        ai_workflow.update_workflow_result(db=db, workflow_id=workflow_id, result_data=result_data)
    else:  # 单图处理任务（向后兼容）
        status_data["single_image_info"] = {
            "image_url": input_data.get("image_url"),
            "variation_count": input_data.get("variation_count", 1)
        }
    
    # 添加完整的结果数据 - 使用更新后的result_data
    status_data["result"] = result_data
    
    return {
        "code": 200,
        "data": status_data
    }


@router.post("/resume/{workflow_id}")
def resume_workflow(
    workflow_id: int,
    background_tasks: BackgroundTasks,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """断点续传工作流"""
    workflow = ai_workflow.get_workflow(db=db, workflow_id=workflow_id)
    if not workflow:
        raise HTTPException(status_code=404, detail="工作流任务未找到")
    
    # 检查是否为批量处理任务
    input_data = workflow.input_data or {}
    if input_data.get("batch_package_id"):  # 批量处理任务
        # 获取批量任务中的所有子工作流
        batch_workflows = ai_workflow.get_workflows_by_batch_package(
            db=db, batch_package_id=input_data["batch_package_id"]
        )
        
        # 为每个失败的子工作流加入全局队列等待续传
        resumed_count = 0
        manager = get_batch_queue_manager(max_concurrent_workflows=5)
        for sub_workflow in batch_workflows:
            if sub_workflow.status == TaskStatus.FAILED:
                manager.enqueue_resume(sub_workflow.id, db)
                resumed_count += 1
        
        return {
            "code": 200,
            "msg": f"批量断点续传已入队，共{resumed_count}个子任务",
            "data": {
                "batch_workflow_id": workflow_id,
                "resumed_sub_tasks": resumed_count
            }
        }
    else:  # 单图处理任务（原逻辑）
        # 新增：运行中保护，避免并发续传导致重复生成
        if workflow.status == TaskStatus.RUNNING:
            return {
                "code": 200,
                "msg": "当前任务正在运行中，无需续传",
                "data": {
                    "workflow_id": workflow_id,
                    "status": "running"
                }
            }
        # 获取任务配置和预期输出
        task_config = workflow.task_config or {}
        expected_outputs = workflow.expected_outputs or {}
        result_data = workflow.result_data or {}
        
        # 如果没有预期输出配置，根据variation_count重新计算
        if not expected_outputs:
            variation_count = input_data.get('variation_count', 1)  # 这是裂变数量
            prompt_count = 10  # 固定生成10个优化提示词
            mj_count = prompt_count  # 10个提示词对应10张MJ图片
            split_count = mj_count * 4  # 每张MJ图片分割成4张
            flux_count = split_count * variation_count  # 每张分割图片生成variation_count个裂变
            
            expected_outputs = {
                "optimized_prompts": prompt_count,  # 修正：10个优化提示词
                "mj_images": mj_count,  # 10张
                "split_images": split_count,  # 40张
                "flux_variations": flux_count,  # 40张
                "product_info_items": flux_count,  # 40条
                "detail_pages": flux_count  # 40张详情页
            }
            
            # 更新工作流的expected_outputs
            ai_workflow.update_workflow_expected_outputs(
                db=db, workflow_id=workflow_id, expected_outputs=expected_outputs
            )
        
        # 🔥 使用与 completion-details 相同的数据统计逻辑
        # 先从数据库重新获取图片数据
        total_mj_images = len(result_data.get("mj_image_urls", []))
        total_split_images = len(result_data.get("split_images", []))
        total_flux_variations = len(result_data.get("flux_variations", []))
        
        if total_mj_images == 0 or total_split_images == 0 or total_flux_variations == 0:
            from app.crud.image import image_crud
            import json
            
            print(f"🔍 resume接口：开始从数据库重新获取工作流 {workflow_id} 的图片数据")
            
            # 获取该工作流的所有图片
            workflow_images = _fetch_all_workflow_images(db=db, workflow_id=workflow_id)
            
            # 重新构建字段数据
            mj_images = []
            split_images = []
            flux_variations = []
            
            for img in workflow_images:
                try:
                    if img.tencent_response:
                        tencent_data = json.loads(img.tencent_response) if isinstance(img.tencent_response, str) else img.tencent_response
                        source = tencent_data.get('source', '')
                        
                        if source == 'mj_generation':
                            mj_images.append(img.cos_url)
                        elif source == 'mj_split':
                            split_images.append({
                                'id': img.id,
                                'entity_id': img.entity_id,
                                'url': img.cos_url,
                                'file_name': img.pic_name,
                                'split_index': tencent_data.get('split_index', 0)
                            })
                        elif source == 'flux_variation':
                            flux_variations.append({
                                'id': img.id,
                                'entity_id': img.entity_id,
                                'url': img.cos_url,
                                'file_name': img.pic_name,
                                'source_image_index': tencent_data.get('source_image_index', 0),
                                'variation_index': tencent_data.get('variation_index', 0)
                            })
                except Exception as e:
                    print(f"❌ 解析图片 {img.id} 的tencent_response失败: {str(e)}")
                    continue
            
            # 更新result_data
            if mj_images:
                result_data["mj_image_urls"] = mj_images
            if split_images:
                result_data["split_images"] = split_images
            if flux_variations:
                result_data["flux_variations"] = flux_variations
            
            # 保存更新后的result_data
            if mj_images or split_images or flux_variations:
                ai_workflow.update_workflow_result(
                    db=db, workflow_id=workflow_id, result_data=result_data
                )
                # 🔥 重新获取最新的 result_data
                workflow = ai_workflow.get_workflow(db=db, workflow_id=workflow_id)
                result_data = workflow.result_data or {}
        
        # 再检查完成情况（使用更新后的result_data）
        completion_status = check_workflow_completion(result_data, expected_outputs, workflow_id=workflow_id, db=db)
        
        if completion_status["is_complete"]:
            return {
                "code": 200,
                "msg": "工作流已完成",
                "data": {
                    "workflow_id": workflow_id,
                    "status": "completed",
                    "completion_details": completion_status
                }
            }
        
        # 断点续传加入全局队列
        manager = get_batch_queue_manager(max_concurrent_workflows=5)
        manager.enqueue_resume(workflow_id, db)
        
        return {
            "code": 200,
            "msg": "断点续传已加入队列",
            "data": {
                "workflow_id": workflow_id,
                "missing_outputs": completion_status["missing_outputs"],
                "completion_rate": completion_status["completion_rate"],
                "expected_outputs": expected_outputs
            }
        }


@router.post("/retry/{workflow_id}/{step_name}")
def retry_single_step(
    workflow_id: int,
    step_name: str,
    background_tasks: BackgroundTasks,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """重试单个步骤"""
    workflow = ai_workflow.get_workflow(db=db, workflow_id=workflow_id)
    if not workflow:
        raise HTTPException(status_code=404, detail="工作流任务未找到")
    
    # 检查是否为批量处理任务
    input_data = workflow.input_data or {}
    if input_data.get("batch_package_id"):  # 批量处理任务
        # 获取批量任务中的所有子工作流
        batch_workflows = ai_workflow.get_workflows_by_batch_package(
            db=db, batch_package_id=input_data["batch_package_id"]
        )
        
        # 为每个子工作流重试指定步骤
        retried_count = 0
        for sub_workflow in batch_workflows:
            background_tasks.add_task(retry_step_background, sub_workflow.id, step_name, db)
            retried_count += 1
        
        return {
            "code": 200,
            "msg": f"批量重试步骤 {step_name} 已启动，共{retried_count}个子任务",
            "data": {
                "batch_workflow_id": workflow_id,
                "step_name": step_name,
                "retried_sub_tasks": retried_count
            }
        }
    else:  # 单图处理任务（原逻辑）
        # 新增：运行中保护，避免在运行中重试导致重复生成
        if workflow.status == TaskStatus.RUNNING:
            return {
                "code": 200,
                "msg": "当前任务正在运行中，为避免重复生成暂不支持运行时重试",
                "data": {
                    "workflow_id": workflow_id,
                    "status": "running"
                }
            }


def check_workflow_completion(result_data: dict, expected_outputs: dict, workflow_id: int = None, db: Session = None) -> dict:
    """检查工作流完成情况"""
    completion_details = {}
    missing_outputs = []
    total_expected = 0
    total_actual = 0
    
    # 🔥 新增：如果字段为空且提供了workflow_id和db，从images表中重新获取
    if workflow_id and db:
        total_mj_images = len(result_data.get("mj_image_urls", []))
        total_split_images = len(result_data.get("split_images", []))
        total_flux_variations = len(result_data.get("flux_variations", []))
        
        if total_mj_images == 0 or total_split_images == 0 or total_flux_variations == 0:
            from app.crud.image import image_crud
            import json
            
            print(f"🔍 check_workflow_completion：开始从数据库重新获取工作流 {workflow_id} 的图片数据")
            print(f"🔍 当前统计: MJ={total_mj_images}, Split={total_split_images}, Flux={total_flux_variations}")
            
            # 获取该工作流的所有图片
            workflow_images = _fetch_all_workflow_images(db=db, workflow_id=workflow_id)
            print(f"🔍 从数据库查询到 {len(workflow_images)} 张图片")
            
            # 重新构建字段数据
            mj_images = []
            split_images = []
            flux_variations = []
            
            for img in workflow_images:
                try:
                    if img.tencent_response:
                        tencent_data = json.loads(img.tencent_response) if isinstance(img.tencent_response, str) else img.tencent_response
                        source = tencent_data.get('source', '')
                        
                        if source == 'mj_generation':
                            mj_images.append(img.cos_url)
                        elif source == 'mj_split':
                            split_images.append({
                                'id': img.id,
                                'entity_id': img.entity_id,
                                'url': img.cos_url,
                                'file_name': img.pic_name,
                                'split_index': tencent_data.get('split_index', 0)
                            })
                        elif source == 'flux_variation':
                            flux_variations.append({
                                'id': img.id,
                                'entity_id': img.entity_id,
                                'url': img.cos_url,
                                'file_name': img.pic_name,
                                'source_image_index': tencent_data.get('source_image_index', 0),
                                'variation_index': tencent_data.get('variation_index', 0)
                            })
                except Exception as e:
                    print(f"❌ 解析图片 {img.id} 的tencent_response失败: {str(e)}")
                    continue
            
            # 更新result_data
            if mj_images:
                result_data["mj_image_urls"] = mj_images
            if split_images:
                result_data["split_images"] = split_images
            if flux_variations:
                result_data["flux_variations"] = flux_variations
            
            print(f"🔍 重新获取后统计: MJ={len(mj_images)}, Split={len(split_images)}, Flux={len(flux_variations)}")
    
    for output_type, expected_count in expected_outputs.items():
        actual_count = 0
        
        if output_type == "optimized_prompts":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_optimized_prompt = result_data.get("optimized_prompt")
            if global_optimized_prompt:
                # 修复：根据实际图片数量计算，而不是固定为10
                image_count = len(processed_images) if processed_images else 1
                actual_count = 10 * image_count  # 每张图片10个提示词
            # 如果全局字段为空，再从 processed_images 统计
            elif processed_images:
                for img_result in processed_images:
                    optimized_prompt = img_result.get("optimized_prompt", "")
                    if optimized_prompt:
                        actual_count += 10  # 每张图片固定10组
        elif output_type == "mj_images":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_mj_urls = result_data.get("mj_image_urls", [])
            actual_count = len(global_mj_urls)
            
            # 如果全局字段为空，再从 processed_images 统计
            if actual_count == 0 and processed_images:
                for img_result in processed_images:
                    mj_urls = img_result.get("mj_image_urls", [])
                    actual_count += len(mj_urls)
        elif output_type == "split_images":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_split_images = result_data.get("split_images", [])
            actual_count = len(global_split_images)
            
            # 如果全局字段为空，再从 processed_images 统计
            if actual_count == 0 and processed_images:
                for img_result in processed_images:
                    split_images = img_result.get("split_images", [])
                    actual_count += len(split_images)
        elif output_type == "flux_variations":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_flux_vars = result_data.get("flux_variations", [])
            actual_count = len(global_flux_vars)
            
            # 如果全局字段为空，再从 processed_images 统计
            if actual_count == 0 and processed_images:
                for img_result in processed_images:
                    flux_vars = img_result.get("flux_variations", [])
                    actual_count += len(flux_vars)
            elif actual_count == 0:
                # 单图处理：原逻辑
                flux_vars = result_data.get("flux_variations") or []
                actual_count = len(flux_vars)
        elif output_type == "product_info_items":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_product_info = result_data.get("product_info", [])
            if global_product_info:
                if isinstance(global_product_info, list):
                    actual_count = len(global_product_info)
                elif isinstance(global_product_info, dict) and any(v for v in global_product_info.values() if v):
                    actual_count = 1
            # 如果全局字段为空，再从 processed_images 统计
            elif processed_images:
                for img_result in processed_images:
                    product_info = img_result.get("product_info")
                    if product_info:
                        if isinstance(product_info, list):
                            actual_count += len(product_info)
                        elif isinstance(product_info, dict) and any(v for v in product_info.values() if v):
                            actual_count += 1
        elif output_type == "detail_pages":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_detail_pages = result_data.get("detail_pages", [])
            if global_detail_pages:
                if isinstance(global_detail_pages, list):
                    actual_count = len(global_detail_pages)
                elif isinstance(global_detail_pages, dict) and any(v for v in global_detail_pages.values() if v):
                    actual_count = 1
            # 如果全局字段为空，再从 processed_images 统计
            elif processed_images:
                for img_result in processed_images:
                    detail_pages = img_result.get("detail_pages")
                    if detail_pages:
                        if isinstance(detail_pages, list):
                            actual_count += len(detail_pages)
                        elif isinstance(detail_pages, dict) and any(v for v in detail_pages.values() if v):
                            actual_count += 1
        
        completion_details[output_type] = {
            "expected": expected_count,
            "actual": actual_count,
            "complete": actual_count >= expected_count
        }
        
        if actual_count < expected_count:
            missing_outputs.append({
                "type": output_type,
                "expected": expected_count,
                "actual": actual_count,
                "missing": expected_count - actual_count
            })
        
        total_expected += expected_count
        total_actual += actual_count
    
    completion_rate = (total_actual / total_expected * 100) if total_expected > 0 else 0
    
    return {
        "is_complete": len(missing_outputs) == 0,
        "completion_rate": round(completion_rate, 2),
        "completion_details": completion_details,
        "missing_outputs": missing_outputs,
        "total_expected": total_expected,
        "total_actual": total_actual
    }


# 添加后台执行函数
def resume_workflow_background(workflow_id: int, db: Session):
    """后台执行断点续传"""
    print(f"\n=== 开始执行断点续传 ===\nWorkflow ID: {workflow_id}\n时间: {datetime.now()}")
    
    async def async_resume():
        """异步执行断点续传的内部函数"""
        try:
            from app.services.ai.workflow_processor import WorkflowProcessor
            local_db = SessionLocal()
            try:
                print(f"[断点续传] 正在更新工作流状态为运行中...")
                ai_workflow.update_workflow_status(
                    db=local_db, workflow_id=workflow_id,
                    status=TaskStatus.RUNNING,
                    current_step="resume_processing"
                )
                print(f"[断点续传] 状态更新完成")
                
                print(f"[断点续传] 正在获取工作流信息...")
                workflow = ai_workflow.get_workflow(db=local_db, workflow_id=workflow_id)
                if not workflow:
                    print(f"[断点续传] 错误: 未找到工作流 {workflow_id}")
                    return
                
                print(f"[断点续传] 工作流信息获取成功")
                print(f"[断点续传] 工作流名称: {workflow.task_name}")
                print(f"[断点续传] 当前状态: {workflow.status}")
                print(f"[断点续传] 当前进度: {workflow.progress}%")
                
                print(f"[断点续传] 正在创建WorkflowProcessor...")
                # 使用异步上下文管理器确保资源正确清理
                async with WorkflowProcessor(db=local_db) as processor:
                    print(f"[断点续传] WorkflowProcessor创建成功，开始执行续传...")
                    
                    result_data = await processor.resume_workflow(workflow_id)
                
                print(f"[断点续传] ✅ 断点续传执行成功！")
                ai_workflow.update_workflow_status(
                    db=local_db, workflow_id=workflow_id,
                    status=TaskStatus.COMPLETED,
                    progress=100
                )
                print(f"[断点续传] 工作流状态已更新为完成")
            finally:
                local_db.close()
        except Exception as e:
            print(f"[断点续传] ❌ 发生异常: {str(e)}")
            print(f"[断点续传] 异常详情: {type(e).__name__}")
            print(f"[断点续传] 堆栈跟踪:\n{traceback.format_exc()}")
            _db = SessionLocal()
            try:
                ai_workflow.update_workflow_status(
                    db=_db, workflow_id=workflow_id,
                    status=TaskStatus.FAILED,
                    error_message=f"断点续传异常: {str(e)}"
                )
                print(f"[断点续传] 工作流状态已更新为失败")
            finally:
                _db.close()
    
    # 在已有事件循环中创建后台任务（避免新建事件循环）
    try:
        loop = asyncio.get_running_loop()
        loop.create_task(async_resume())
    except RuntimeError:
        # 如果当前线程没有运行中的事件循环，则放到新线程中跑
        threading.Thread(target=lambda: asyncio.run(async_resume()), daemon=True).start()
    
    print(f"\n=== 断点续传执行结束 ===\n时间: {datetime.now()}\n")


def retry_step_background(workflow_id: int, step_name: str, db: Session):
    """后台执行单步重试"""
    async def async_retry():
        """异步执行单步重试的内部函数"""
        try:
            from app.services.ai.workflow_processor import WorkflowProcessor
            local_db = SessionLocal()
            try:
                # 更新状态
                ai_workflow.update_workflow_status(
                    db=local_db, workflow_id=workflow_id,
                    status=TaskStatus.RUNNING,
                    current_step=f"retry_{step_name}"
                )
                
                # 执行重试
                # 使用异步上下文管理器确保资源正确清理
                async with WorkflowProcessor(db=local_db) as processor:
                    success = await processor.retry_single_step(workflow_id, step_name)
                
                if success:
                    ai_workflow.update_workflow_status(
                        db=local_db, workflow_id=workflow_id,
                        status=TaskStatus.RUNNING,
                        current_step="processing"
                    )
                else:
                    ai_workflow.update_workflow_status(
                        db=local_db, workflow_id=workflow_id,
                        status=TaskStatus.FAILED,
                        error_message=f"步骤 {step_name} 重试失败"
                    )
            finally:
                local_db.close()
        except Exception as e:
            _db = SessionLocal()
            try:
                ai_workflow.update_workflow_status(
                    db=_db, workflow_id=workflow_id,
                    status=TaskStatus.FAILED,
                    error_message=f"步骤重试异常: {str(e)}"
                )
            finally:
                _db.close()
    
    # 在已有事件循环中创建后台任务
    try:
        loop = asyncio.get_running_loop()
        loop.create_task(async_retry())
    except RuntimeError:
        threading.Thread(target=lambda: asyncio.run(async_retry()), daemon=True).start()


def process_workflow_pipeline(workflow_id: int, image_url: str, user_prompt: str, db: Session):
    """执行完整的AI工作流管道"""
    async def async_process():
        """异步执行工作流管道的内部函数"""
        local_db = SessionLocal()
        try:
            # 使用异步上下文管理器确保资源正确清理
            async with WorkflowProcessor(local_db) as processor:
                await processor.execute_full_pipeline(workflow_id, image_url, user_prompt)  # 传递image_url而不是本地路径
        finally:
            local_db.close()
    
    # 在已有事件循环中创建后台任务
    try:
        loop = asyncio.get_running_loop()
        loop.create_task(async_process())
    except RuntimeError:
        threading.Thread(target=lambda: asyncio.run(async_process()), daemon=True).start()


def process_single_workflow_pipeline(workflow_id: int, image_url: str, user_prompt: str, variation_count: int, db: Session):
    """执行单张图片的AI工作流管道"""
    async def async_single_process():
        """异步执行单图片工作流管道的内部函数"""
        local_db = SessionLocal()
        try:
            # 使用异步上下文管理器确保资源正确清理
            async with WorkflowProcessor(local_db) as processor:
                await processor.execute_full_pipeline(workflow_id, image_url, user_prompt)
        finally:
            local_db.close()
    
    # 在已有事件循环中创建后台任务
    try:
        loop = asyncio.get_running_loop()
        loop.create_task(async_single_process())
    except RuntimeError:
        threading.Thread(target=lambda: asyncio.run(async_single_process()), daemon=True).start()

def process_batch_workflow_pipeline(workflow_id: int, image_urls: List[str], user_prompt: str, variation_count: int, db: Session):
    """执行批量图片的AI工作流管道"""
    async def async_batch_process():
        """异步执行批量工作流管道的内部函数"""
        local_db = SessionLocal()
        try:
            # 使用异步上下文管理器确保资源正确清理
            async with WorkflowProcessor(local_db) as processor:
                await processor.execute_batch_pipeline(workflow_id, image_urls, user_prompt, variation_count)
        finally:
            local_db.close()
    
    # 在已有事件循环中创建后台任务
    try:
        loop = asyncio.get_running_loop()
        loop.create_task(async_batch_process())
    except RuntimeError:
        # 如果当前线程没有运行中的事件循环，则放到新线程中跑
        threading.Thread(target=lambda: asyncio.run(async_batch_process()), daemon=True).start()

def process_queue_batch_pipeline(workflow_ids: List[int], image_urls: List[str], user_prompt: str, variation_count: int, max_concurrent: int, db: Session):
    """执行队列批处理的AI工作流管道"""
    async def async_queue_batch_process():
        """异步执行队列批处理工作流管道的内部函数"""
        import asyncio
        from asyncio import Semaphore
        
        local_db = SessionLocal()
        try:
            print(f"🚀 启动队列批处理: {len(workflow_ids)}个任务，最大并发{max_concurrent}个")
            
            # 创建信号量控制并发数
            semaphore = Semaphore(max_concurrent)
            
            async def process_single_task(workflow_id: int, image_url: str):
                """处理单个任务"""
                async with semaphore:
                    try:
                        print(f"🔄 开始处理任务 {workflow_id}")
                        # 更新任务状态为运行中
                        ai_workflow.update_workflow_status(
                            db=local_db,
                            workflow_id=workflow_id,
                            status=TaskStatus.RUNNING,
                            current_step="开始处理",
                            progress=0
                        )
                        
                        # 使用WorkflowProcessor处理任务
                        async with WorkflowProcessor(local_db) as processor:
                            await processor.execute_full_pipeline(workflow_id, image_url, user_prompt)
                        
                        print(f"✅ 任务 {workflow_id} 处理完成")
                        
                    except Exception as e:
                        print(f"❌ 任务 {workflow_id} 处理失败: {str(e)}")
                        ai_workflow.update_workflow_status(
                            db=local_db,
                            workflow_id=workflow_id,
                            status=TaskStatus.FAILED,
                            error_message=f"任务处理失败: {str(e)}"
                        )
            
            # 创建所有任务
            tasks = []
            for workflow_id, image_url in zip(workflow_ids, image_urls):
                task = asyncio.create_task(process_single_task(workflow_id, image_url))
                tasks.append(task)
            
            # 等待所有任务完成
            await asyncio.gather(*tasks, return_exceptions=True)
            
            print(f"✅ 队列批处理完成: 共处理{len(workflow_ids)}个任务")
            
        except Exception as e:
            print(f"❌ 队列批处理失败: {str(e)}")
            # 更新所有失败任务的状态
            for workflow_id in workflow_ids:
                try:
                    workflow = ai_workflow.get_workflow(db=local_db, workflow_id=workflow_id)
                    if workflow and workflow.status not in [TaskStatus.COMPLETED, TaskStatus.FAILED]:
                        ai_workflow.update_workflow_status(
                            db=local_db,
                            workflow_id=workflow_id,
                            status=TaskStatus.FAILED,
                            error_message=f"队列批处理失败: {str(e)}"
                        )
                except:
                    pass
            raise
        finally:
            local_db.close()
    
    # 在已有事件循环中创建后台任务
    try:
        loop = asyncio.get_running_loop()
        loop.create_task(async_queue_batch_process())
    except RuntimeError:
        # 如果当前线程没有运行中的事件循环，则放到新线程中跑
        threading.Thread(target=lambda: asyncio.run(async_queue_batch_process()), daemon=True).start()

def process_batch_queue_pipeline(workflow_id: int, image_urls: List[str], user_prompt: str, variation_count: int, batch_size: int, max_concurrent_batches: int, db: Session):
    """执行分批队列处理的AI工作流管道"""
    async def async_batch_queue_process():
        """异步执行分批队列工作流管道的内部函数"""
        local_db = SessionLocal()
        try:
            print(f"🚀 启动分批队列处理: {len(image_urls)}张图片，每批{batch_size}张，最大并发{max_concurrent_batches}批")
            
            # 获取批量队列管理器
            queue_manager = get_batch_queue_manager(
                max_concurrent_workflows=max_concurrent_batches,  # 注意：这里将批次并发改为工作流并发
                batch_size=batch_size
            )
            
            # 执行分批队列处理
            result = await queue_manager.process_large_batch(
                workflow_id=workflow_id,
                image_urls=image_urls,
                user_prompt=user_prompt,
                variation_count=variation_count,
                db=local_db
            )
            
            print(f"✅ 分批队列处理完成: workflow_id={workflow_id}")
            return result
            
        except Exception as e:
            print(f"❌ 分批队列处理失败: {str(e)}")
            # 更新工作流状态为失败
            ai_workflow.update_workflow_status(
                db=local_db,
                workflow_id=workflow_id,
                status=TaskStatus.FAILED,
                error_message=f"分批队列处理失败: {str(e)}"
            )
            raise
        finally:
            local_db.close()
    
    # 在已有事件循环中创建后台任务
    try:
        loop = asyncio.get_running_loop()
        loop.create_task(async_batch_queue_process())
    except RuntimeError:
        # 如果当前线程没有运行中的事件循环，则放到新线程中跑
        threading.Thread(target=lambda: asyncio.run(async_batch_queue_process()), daemon=True).start()


@router.get("/completion-details/{workflow_id}")
def get_workflow_completion_details(
    workflow_id: int,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """获取工作流任务的内部完成详情"""
    workflow = ai_workflow.get_workflow(db=db, workflow_id=workflow_id)
    if not workflow:
        raise HTTPException(status_code=404, detail="工作流任务未找到")
    
    # 首先获取 image_count，确保在整个函数中都可以使用
    input_data = workflow.input_data or {}
    variation_count = input_data.get('variation_count', 1)
    
    # 从input_data获取image_task_id，然后查询image_count
    image_task_id = input_data.get('image_task_id')
    image_count = 1  # 默认值
    
    if image_task_id:
        # 从image_tasks表获取image_count
        task = image_task.get_task(db=db, task_id=image_task_id, user_id=workflow.user_id)
        if task and task.image_count:
            image_count = task.image_count
    elif input_data.get("image_urls"):
        # 兼容旧的image_urls方式
        image_count = len(input_data.get("image_urls", []))
    
    # 获取预期输出配置
    expected_outputs = workflow.expected_outputs
    
    # 🔧 修复：强制重新计算 expected_outputs 以确保使用正确的 image_count
    # 检查是否为批量处理任务
    if input_data.get("batch_package_id") or input_data.get("image_urls") or image_task_id:
        # 批量处理任务：根据image_count计算
        prompt_count = 10 * image_count  # 每张图片10个提示词
        mj_count = prompt_count
        split_count = mj_count * 4
        flux_count = split_count * variation_count
        
        expected_outputs = {
            "optimized_prompts": prompt_count,
            "mj_images": mj_count,
            "split_images": split_count,
            "flux_variations": flux_count,
            "product_info_items": flux_count,
            "detail_pages": flux_count
        }
    elif not expected_outputs:
        # 单图处理任务（原逻辑）
        prompt_count = 10
        mj_count = prompt_count
        split_count = mj_count * 4
        flux_count = split_count * variation_count
        
        expected_outputs = {
            "optimized_prompts": prompt_count,
            "mj_images": mj_count,
            "split_images": split_count,
            "flux_variations": flux_count,
            "product_info_items": flux_count,
            "detail_pages": flux_count
        }
    
    
    result_data = workflow.result_data or {}
    
    # 🔥 新增：如果字段为空，从images表中重新获取
    total_mj_images = len(result_data.get("mj_image_urls", []))
    total_split_images = len(result_data.get("split_images", []))
    total_flux_variations = len(result_data.get("flux_variations", []))
    
    if total_mj_images == 0 or total_split_images == 0 or total_flux_variations == 0:
        from app.crud.image import image_crud
        import json
        
        print(f"🔍 completion-details接口：开始从数据库重新获取工作流 {workflow_id} 的图片数据")
        print(f"🔍 当前统计: MJ={total_mj_images}, Split={total_split_images}, Flux={total_flux_variations}")
        
        # 获取该工作流的所有图片
        workflow_images = _fetch_all_workflow_images(db=db, workflow_id=workflow_id)
        print(f"🔍 从数据库查询到 {len(workflow_images)} 张图片")
        
        # 重新构建字段数据
        mj_images = []
        split_images = []
        flux_variations = []
        
        for img in workflow_images:
            try:
                if img.tencent_response:
                    tencent_data = json.loads(img.tencent_response) if isinstance(img.tencent_response, str) else img.tencent_response
                    source = tencent_data.get('source', '')
                    
                    if source == 'mj_generation':
                        mj_images.append(img.cos_url)  # 修改：使用 cos_url 而不是 url
                    elif source == 'mj_split':
                        split_images.append({
                            'id': img.id,
                            'entity_id': img.entity_id,
                            'url': img.cos_url,
                            'file_name': img.pic_name,
                            'split_index': tencent_data.get('split_index', 0)  # 修改：response_data -> tencent_data
                        })
                    elif source == 'flux_variation':
                        flux_variations.append({
                            'id': img.id,
                            'entity_id': img.entity_id,
                            'url': img.cos_url,
                            'file_name': img.pic_name,
                            'source_image_index': tencent_data.get('source_image_index', 0),  # 修改：response_data -> tencent_data
                            'variation_index': tencent_data.get('variation_index', 0)  # 修改：response_data -> tencent_data
                        })
            except Exception as e:
                print(f"❌ 解析图片 {img.id} 的tencent_response失败: {str(e)}")
                continue
        
        print(f"🔍 重新统计结果: MJ={len(mj_images)}, Split={len(split_images)}, Flux={len(flux_variations)}")
        
        # 更新result_data
        if mj_images:
            result_data["mj_image_urls"] = mj_images
        if split_images:
            result_data["split_images"] = split_images
        if flux_variations:
            result_data["flux_variations"] = flux_variations
        
        # 保存更新后的result_data
        if mj_images or split_images or flux_variations:
            ai_workflow.update_workflow_result(
                db=db, workflow_id=workflow_id, result_data=result_data
            )
            print(f"💾 已更新工作流 {workflow_id} 的result_data")    
    # 计算各项完成情况
    completion_details = {}
    total_expected = 0
    total_actual = 0
    
    for output_type, expected_count in expected_outputs.items():
        actual_count = 0

        if output_type == "optimized_prompts":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_optimized_prompt = result_data.get("optimized_prompt")
            if global_optimized_prompt:
                # 修复：根据实际图片数量计算，而不是固定为10
                actual_count = 10 * image_count  # 每张图片10个提示词
            # 如果全局字段为空，再从 processed_images 统计
            elif processed_images:
                for img_result in processed_images:
                    optimized_prompt = img_result.get("optimized_prompt", "")
                    if optimized_prompt:
                        actual_count += 10  # 每张图片固定10组
        elif output_type == "mj_images":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_mj_urls = result_data.get("mj_image_urls", [])
            actual_count = len(global_mj_urls)
            
            # 如果全局字段为空，再从 processed_images 统计
            if actual_count == 0 and processed_images:
                for img_result in processed_images:
                    mj_urls = img_result.get("mj_image_urls", [])
                    actual_count += len(mj_urls)
        elif output_type == "split_images":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_split_images = result_data.get("split_images", [])
            actual_count = len(global_split_images)
            
            # 如果全局字段为空，再从 processed_images 统计
            if actual_count == 0 and processed_images:
                for img_result in processed_images:
                    split_images = img_result.get("split_images", [])
                    actual_count += len(split_images)
        elif output_type == "flux_variations":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_flux_vars = result_data.get("flux_variations", [])
            actual_count = len(global_flux_vars)
            
            # 如果全局字段为空，再从 processed_images 统计
            if actual_count == 0 and processed_images:
                for img_result in processed_images:
                    flux_vars = img_result.get("flux_variations", [])
                    actual_count += len(flux_vars)
            elif actual_count == 0:
                # 单图处理：原逻辑
                flux_vars = result_data.get("flux_variations") or []
                actual_count = len(flux_vars)
        elif output_type == "product_info_items":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_product_info = result_data.get("product_info", [])
            if global_product_info:
                if isinstance(global_product_info, list):
                    actual_count = len(global_product_info)
                elif isinstance(global_product_info, dict) and any(v for v in global_product_info.values() if v):
                    actual_count = 1
            # 如果全局字段为空，再从 processed_images 统计
            elif processed_images:
                for img_result in processed_images:
                    product_info = img_result.get("product_info")
                    if product_info:
                        if isinstance(product_info, list):
                            actual_count += len(product_info)
                        elif isinstance(product_info, dict) and any(v for v in product_info.values() if v):
                            actual_count += 1
        elif output_type == "detail_pages":
            # 批量处理：优先从全局字段统计，再从 processed_images 统计
            processed_images = result_data.get("processed_images", [])
            
            # 先尝试从全局字段获取（增量保存的结果）
            global_detail_pages = result_data.get("detail_pages", [])
            if global_detail_pages:
                if isinstance(global_detail_pages, list):
                    actual_count = len(global_detail_pages)
                elif isinstance(global_detail_pages, dict) and any(v for v in global_detail_pages.values() if v):
                    actual_count = 1
            # 如果全局字段为空，再从 processed_images 统计
            elif processed_images:
                for img_result in processed_images:
                    detail_pages = img_result.get("detail_pages")
                    if detail_pages:
                        if isinstance(detail_pages, list):
                            actual_count += len(detail_pages)
                        elif isinstance(detail_pages, dict) and any(v for v in detail_pages.values() if v):
                            actual_count += 1
        
        # 在计算完 actual_count 之后添加
        print(f"  actual_count: {actual_count}")
        print(f"  completion: {actual_count}/{expected_count}")
        
        # 简化格式：只显示 "已完成/总数"
        completion_details[output_type] = f"{actual_count}/{expected_count}"
        
        total_expected += expected_count
        total_actual += actual_count
    
    completion_rate = (total_actual / total_expected * 100) if total_expected > 0 else 0
    
    return {
        "workflow_id": workflow_id,
        "task_name": workflow.task_name,
        "status": workflow.status,
        "completion_details": completion_details,
        "summary": {
            "completed": total_actual,
            "total": total_expected,
            "completion_rate": round(completion_rate, 2),
            "is_complete": total_actual >= total_expected
        }
    }


@router.post("/completion-details/batch", response_model=BatchCompletionDetailsResponse)
def get_batch_workflow_completion_details(
    request: BatchCompletionDetailsRequest,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """批量获取工作流任务的内部完成详情"""
    workflow_ids = request.workflow_ids
    
    # 验证输入
    if not workflow_ids:
        raise HTTPException(status_code=400, detail="工作流ID列表不能为空")
    
    if len(workflow_ids) > 50:
        raise HTTPException(status_code=400, detail="一次最多查询50个工作流")
    
    workflows = []
    success_count = 0
    not_found_count = 0
    access_denied_count = 0
    query_errors = 0
    
    for workflow_id in workflow_ids:
        try:
            # 获取工作流
            workflow = ai_workflow.get_workflow(db=db, workflow_id=workflow_id)
            if not workflow:
                not_found_count += 1
                continue
            
            # 权限检查
            if workflow.user_id != current_user.id:
                access_denied_count += 1
                continue
            
            # 复用单个查询的逻辑
            input_data = workflow.input_data or {}
            variation_count = input_data.get('variation_count', 1)
            
            # 从input_data获取image_task_id，然后查询image_count
            image_task_id = input_data.get('image_task_id')
            image_count = 1  # 默认值
            
            if image_task_id:
                # 从image_tasks表获取image_count
                task = image_task.get_task(db=db, task_id=image_task_id, user_id=workflow.user_id)
                if task and task.image_count:
                    image_count = task.image_count
            elif input_data.get("image_urls"):
                # 兼容旧的image_urls方式
                image_count = len(input_data.get("image_urls", []))
            elif input_data.get("image_path"):
                # 新的image_path方式
                image_count = len(input_data.get("image_path", []))
            
            # 获取预期输出配置
            expected_outputs = workflow.expected_outputs
            
            # 强制重新计算 expected_outputs
            if input_data.get("batch_package_id") or input_data.get("image_urls") or image_task_id or input_data.get("image_path"):
                # 批量处理任务：根据image_count计算
                prompt_count = 10 * image_count
                mj_count = prompt_count
                split_count = mj_count * 4
                flux_count = split_count * variation_count
                
                expected_outputs = {
                    "optimized_prompts": prompt_count,
                    "mj_images": mj_count,
                    "split_images": split_count,
                    "flux_variations": flux_count,
                    "product_info_items": flux_count,
                    "detail_pages": flux_count
                }
            elif not expected_outputs:
                # 单图处理任务
                prompt_count = 10
                mj_count = prompt_count
                split_count = mj_count * 4
                flux_count = split_count * variation_count
                
                expected_outputs = {
                    "optimized_prompts": prompt_count,
                    "mj_images": mj_count,
                    "split_images": split_count,
                    "flux_variations": flux_count,
                    "product_info_items": flux_count,
                    "detail_pages": flux_count
                }
            
            result_data = workflow.result_data or {}
            
            # 计算各项完成情况
            completion_details = {}
            total_expected = 0
            total_actual = 0
            
            for output_type, expected_count in expected_outputs.items():
                actual_count = 0
                
                if output_type == "optimized_prompts":
                    processed_images = result_data.get("processed_images", [])
                    global_optimized_prompt = result_data.get("optimized_prompt")
                    if global_optimized_prompt:
                        actual_count = 10 * image_count
                    elif processed_images:
                        for img_result in processed_images:
                            optimized_prompt = img_result.get("optimized_prompt", "")
                            if optimized_prompt:
                                actual_count += 10
                elif output_type == "mj_images":
                    processed_images = result_data.get("processed_images", [])
                    global_mj_urls = result_data.get("mj_image_urls", [])
                    actual_count = len(global_mj_urls) if global_mj_urls else 0
                    if actual_count == 0 and processed_images:
                        for img_result in processed_images:
                            mj_urls = img_result.get("mj_image_urls", [])
                            actual_count += len(mj_urls) if mj_urls else 0
                elif output_type == "split_images":
                    processed_images = result_data.get("processed_images", [])
                    global_split_images = result_data.get("split_images", [])
                    actual_count = len(global_split_images) if global_split_images else 0
                    if actual_count == 0 and processed_images:
                        for img_result in processed_images:
                            split_images = img_result.get("split_images", [])
                            actual_count += len(split_images) if split_images else 0
                elif output_type == "flux_variations":
                    processed_images = result_data.get("processed_images", [])
                    global_flux_vars = result_data.get("flux_variations", [])
                    actual_count = len(global_flux_vars) if global_flux_vars else 0
                    if actual_count == 0 and processed_images:
                        for img_result in processed_images:
                            flux_vars = img_result.get("flux_variations", [])
                            actual_count += len(flux_vars) if flux_vars else 0
                    elif actual_count == 0:
                        flux_vars = result_data.get("flux_variations") or []
                        actual_count = len(flux_vars) if flux_vars else 0
                elif output_type == "product_info_items":
                    processed_images = result_data.get("processed_images", [])
                    global_product_info = result_data.get("product_info", [])
                    if global_product_info:
                        if isinstance(global_product_info, list):
                            actual_count = len(global_product_info)
                        elif isinstance(global_product_info, dict) and any(v for v in global_product_info.values() if v):
                            actual_count = 1
                    elif processed_images:
                        for img_result in processed_images:
                            product_info = img_result.get("product_info")
                            if product_info:
                                if isinstance(product_info, list):
                                    actual_count += len(product_info)
                                elif isinstance(product_info, dict) and any(v for v in product_info.values() if v):
                                    actual_count += 1
                elif output_type == "detail_pages":
                    processed_images = result_data.get("processed_images", [])
                    global_detail_pages = result_data.get("detail_pages", [])
                    if global_detail_pages:
                        if isinstance(global_detail_pages, list):
                            actual_count = len(global_detail_pages)
                        elif isinstance(global_detail_pages, dict) and any(v for v in global_detail_pages.values() if v):
                            actual_count = 1
                    elif processed_images:
                        for img_result in processed_images:
                            detail_pages = img_result.get("detail_pages")
                            if detail_pages:
                                if isinstance(detail_pages, list):
                                    actual_count += len(detail_pages)
                                elif isinstance(detail_pages, dict) and any(v for v in detail_pages.values() if v):
                                    actual_count += 1
                
                completion_details[output_type] = f"{actual_count}/{expected_count}"
                total_expected += expected_count
                total_actual += actual_count
            
            completion_rate = (total_actual / total_expected * 100) if total_expected > 0 else 0
            
            workflow_details = {
                "workflow_id": workflow_id,
                "task_name": workflow.task_name,
                "status": workflow.status.value,
                "completion_details": completion_details,
                "summary": {
                    "completed": total_actual,
                    "total": total_expected,
                    "completion_rate": round(completion_rate, 2),
                    "is_complete": total_actual >= total_expected
                },
                "error": workflow.error_message
            }
            
            workflows.append(workflow_details)
            success_count += 1
            
        except Exception as e:
            query_errors += 1
            print(f"❌ 查询工作流 {workflow_id} 时发生错误: {str(e)}")
            continue
    
    return {
        "code": 200,
        "msg": f"成功查询 {success_count}/{len(workflow_ids)} 个工作流完成详情",
        "data": {
            "workflows": workflows,
            "total_count": len(workflow_ids),
            "success_count": success_count,
            "query_summary": {
                "requested": len(workflow_ids),
                "found": success_count,
                "not_found": not_found_count,
                "access_denied": access_denied_count,
                "query_errors": query_errors
            }
        }
    }