from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from qwen_agent.agents import Assistant
import json
import uvicorn
import logging
import os
import re
import uuid
import asyncio
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, Any, Optional, List
from enum import Enum
import threading
from datetime import datetime, timedelta
import time
import psutil
from queue import Queue
import requests

# Configure logging
log_directory = "logs"
if not os.path.exists(log_directory):
    os.makedirs(log_directory)
log_file_path = os.path.join(log_directory, "client.log")

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(log_file_path, encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


def extract_keywords_from_segment(segment):
    """
    从segment内容中提取查询关键词，支持多种segment类型
    
    Args:
        segment: segment数据字典
        
    Returns:
        str: 提取的关键词文本
    """
    if not isinstance(segment, dict):
        return str(segment) if segment else ""
    
    segment_type = segment.get('type', '')
    segment_class = segment.get('class', '')
    keywords = []
    
    try:
        # 1. 普通段落文字 (type: text, 无class或class不是特殊值)
        if segment_type == 'text' and segment_class not in ['comment', 'textbox']:
            val = segment.get('val', '')
            if val:
                keywords.append(str(val))
        
        # 2. 特殊段落文字-注释 (type: text, class: comment)
        elif segment_type == 'text' and segment_class == 'comment':
            val = segment.get('val', '')
            if val:
                keywords.append(f"注释内容: {str(val)}")
        
        # 3. 单表头表格 (type: table, class: one)
        elif segment_type == 'table' and segment_class == 'one':
            name = segment.get('name', '')
            if name:
                keywords.append(f"表格: {str(name)}")
            
            # 提取表头信息
            thead = segment.get('thead', [])
            if thead and isinstance(thead, list) and len(thead) > 0:
                if isinstance(thead[0], list):
                    headers = ' '.join(str(h) for h in thead[0] if h)
                    keywords.append(f"表头: {headers}")
        
        # 4. 多组表头表格 (type: table, class: more)
        elif segment_type == 'table' and segment_class == 'more':
            name = segment.get('name', '')
            if name:
                keywords.append(f"多表头表格: {str(name)}")
            
            # 提取所有表头信息
            thead = segment.get('thead', [])
            if thead and isinstance(thead, list):
                for i, header_row in enumerate(thead):
                    if isinstance(header_row, list):
                        headers = ' '.join(str(h) for h in header_row if h)
                        keywords.append(f"表头{i+1}: {headers}")
        
        # 5. 含有悬浮文字的特殊图片 (type: text, class: textbox)
        elif segment_type == 'text' and segment_class == 'textbox':
            name = segment.get('name', '')
            val = segment.get('val', '')
            if name:
                keywords.append(f"图片: {str(name)}")
            if val:
                keywords.append(f"悬浮文字: {str(val)}")
        
        # 6. 新增类型 - 悬浮文字图片 (type: floatImg)
        elif segment_type == 'floatImg':
            name = segment.get('name', '')
            if name:
                keywords.append(f"悬浮图片: {str(name)}")
            
            textbox = segment.get('textBox', [])
            if textbox and isinstance(textbox, list):
                text_content = ' '.join(str(text) for text in textbox if text)
                keywords.append(f"文本框内容: {text_content}")
        
        # 7. 新增类型 - 含有编码的特殊段落 (type: ul)
        elif segment_type == 'ul':
            name = segment.get('name', '')
            if name:
                keywords.append(f"列表标题: {str(name)}")
            
            lis = segment.get('lis', [])
            if lis and isinstance(lis, list):
                list_content = ' '.join(str(item) for item in lis if item)
                keywords.append(f"列表内容: {list_content}")
        
        # 8. 通用表格类型 (type: table, 无class或其他class值)
        elif segment_type == 'table':
            # 提取表格名称
            name = segment.get('name', '')
            if name:
                keywords.append(f"表格: {str(name)}")
            
            # 提取表头信息（统一处理，不区分class）
            thead = segment.get('thead', [])
            if thead and isinstance(thead, list):
                # 处理多行表头的情况
                all_headers = []
                for header_row in thead:
                    if isinstance(header_row, list):
                        headers = ' '.join(str(h) for h in header_row if h and str(h).strip())
                        if headers:
                            all_headers.append(headers)
                
                if all_headers:
                    keywords.append(f"表头: {' '.join(all_headers)}")
        
        # 9. 其他类型的通用处理
        else:
            # 尝试从name字段提取
            name = segment.get('name', '')
            if name:
                keywords.append(f"名称: {str(name)}")
            
            # 尝试从val字段提取
            val = segment.get('val', '')
            if val:
                keywords.append(f"内容: {str(val)}")
            
            # 如果还是没有内容，使用type信息
            if not keywords and segment_type:
                keywords.append(f"{segment_type} 数据")
        
        # 添加数据类型信息用于更精确的匹配
        data_type = segment.get('dataType', '')
        if data_type:
            data_type_desc = "客观数据" if data_type == "objData" else "调研数据" if data_type == "dyData" else data_type
            keywords.append(f"数据类型: {data_type_desc}")
        
        # 如果有位置信息，也添加进去
        current_location = segment.get('current_location', {})
        if current_location and isinstance(current_location, dict):
            location_parts = []
            for key in ['chapter', 'section', 'subsection', 'subsubsection']:
                value = current_location.get(key, '')
                if value:
                    location_parts.append(str(value))
            if location_parts:
                keywords.append(f"位置: {' '.join(location_parts)}")
        
    except Exception as e:
        logger.warning(f"提取segment关键词时出错: {e}")
        # 如果出错，回退到简单提取
        val = segment.get('val', '')
        name = segment.get('name', '')
        if val:
            keywords.append(str(val))
        elif name:
            keywords.append(str(name))
        elif segment_type:
            keywords.append(f"{segment_type} 数据")
    
    # 清理和合并关键词
    final_text = ' '.join(keywords).strip()
    
    # 如果最终还是空的，提供一个默认描述
    if not final_text:
        final_text = f"未知类型数据 (type: {segment_type}, class: {segment_class})"
    
    return final_text

app = FastAPI(
    title="MCP API Service",
    description="API service for MCP report generation and updates",
    version="1.0.0"
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 在生产环境中应该设置具体的域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

@app.get("/health")
async def health_check():
    """健康检查端点"""
    return {"status": "healthy", "service": "Agent Client", "version": "2.0.0"}

@app.on_event("startup")
async def startup_event():
    """FastAPI启动事件"""
    logger.info("📡 FastAPI应用启动事件触发")
    
    try:
        # 执行主要的应用初始化
        startup_initialization()
        
        # 初始化RAG知识库（懒加载）
        from .rag_knowledge_base import get_rag_knowledge_base
        rag_kb = get_rag_knowledge_base()
        
        if rag_kb.embeddings_file.exists():
            logger.info("✅ RAG知识库已就绪")
        else:
            logger.info("ℹ️  RAG知识库将在首次使用时生成")
        
        # 最终状态报告
        pool_size = agent_pool.qsize()
        if pool_size > 0:
            logger.info(f"🎯 服务已就绪 - Agent池: {pool_size}/{MAX_GLOBAL_LLM_CALLS} 个实例可用")
        else:
            logger.warning("⚠️  服务启动完成，但Agent池为空，可能影响性能")
            
    except Exception as e:
        logger.error(f"❌ FastAPI启动初始化失败: {e}")
        # 不阻止应用启动，但记录错误

@app.on_event("shutdown")
async def shutdown_event():
    """FastAPI关闭事件"""
    logger.info("FastAPI应用关闭，开始清理资源...")
    shutdown_cleanup()

class TaskStatus(str, Enum):
    PENDING = "pending"
    RUNNING = "running"
    COMPLETED = "completed"
    FAILED = "failed"

class ReportRequest(BaseModel):
    common_params: dict
    content_segments: list

class TaskResponse(BaseModel):
    task_id: str
    status: TaskStatus
    message: str

class TaskStatusResponse(BaseModel):
    task_id: str
    status: TaskStatus
    progress: Optional[int] = None
    result: Optional[Dict[str, Any]] = None
    error: Optional[str] = None
    created_at: Optional[str] = None
    completed_at: Optional[str] = None
    estimated_completion_time: Optional[str] = None
    duration_seconds: Optional[float] = None

class TaskSummary(BaseModel):
    task_id: str
    status: TaskStatus
    progress: Optional[int] = None
    created_at: str
    completed_at: Optional[str] = None
    duration_seconds: Optional[float] = None
    estimated_completion_time: Optional[str] = None
    segments_count: Optional[int] = None

class TaskStatistics(BaseModel):
    total_tasks: int
    pending_tasks: int
    running_tasks: int
    completed_tasks: int
    failed_tasks: int
    average_duration_seconds: Optional[float] = None
    estimated_queue_wait_time_seconds: Optional[float] = None
    current_running_tasks: List[TaskSummary] = []
    recent_completed_tasks: List[TaskSummary] = []

# 全局任务存储
tasks_storage: Dict[str, Dict[str, Any]] = {}
tasks_lock = threading.Lock()

# 全局并发控制配置 - 针对异步离线处理场景优化
MAX_GLOBAL_LLM_CALLS = 5  # 全局最大LLM调用数
MAX_CONCURRENT_ASYNC_TASKS = 3  # 最多同时运行3个异步任务，适应大量任务处理需求
AGENT_POOL_TIMEOUT = 600  # Agent池获取超时时间（秒），异步场景允许更长等待
MAX_QUEUE_SIZE_DEFAULT = 200  # 默认队列大小，支持大量任务排队

# 全局线程池执行器，确保最多5个并发调用大模型
global_executor = ThreadPoolExecutor(max_workers=MAX_GLOBAL_LLM_CALLS)

# Agent实例池
agent_pool = Queue(maxsize=MAX_GLOBAL_LLM_CALLS)

# Agent池初始化状态
_agent_pool_initialized = False
_agent_pool_init_lock = threading.Lock()

# 异步任务队列控制
async_task_queue = Queue()  # 等待处理的异步任务队列
running_async_tasks = {}  # 当前运行中的异步任务 {task_id: thread}
async_task_semaphore = threading.Semaphore(MAX_CONCURRENT_ASYNC_TASKS)  # 控制并发数量
async_queue_lock = threading.Lock()  # 保护队列操作

# 统计数据缓存
stats_cache = {
    'data': None,
    'last_update': 0,
    'cache_duration': 30  # 缓存30秒
}
stats_cache_lock = threading.Lock()

# 内存监控和任务清理配置
MEMORY_WARNING_THRESHOLD = 80  # 内存使用率警告阈值（%）
MEMORY_CRITICAL_THRESHOLD = 90  # 内存使用率临界阈值（%）
TASK_COUNT_WARNING_THRESHOLD = 1000  # 任务数量警告阈值
TASK_COUNT_CRITICAL_THRESHOLD = 2000  # 任务数量严重警告阈值
TASK_CLEANUP_INTERVAL = 3600  # 任务清理间隔（秒）
TASK_MAX_AGE_HOURS = 24  # 任务最大保留时间（小时）

def invalidate_stats_cache():
    """使统计缓存失效"""
    with stats_cache_lock:
        stats_cache['data'] = None
        stats_cache['last_update'] = 0

def cleanup_old_tasks():
    """清理超过指定时间的已完成任务"""
    current_time = datetime.now()
    cleanup_count = 0
    
    with tasks_lock:
        tasks_to_remove = []
        for task_id, task_data in tasks_storage.items():
            if task_data['status'] in [TaskStatus.COMPLETED, TaskStatus.FAILED]:
                try:
                    completed_time = datetime.fromisoformat(task_data.get('completed_at', task_data['created_at']))
                    age_hours = (current_time - completed_time).total_seconds() / 3600
                    if age_hours > TASK_MAX_AGE_HOURS:
                        tasks_to_remove.append(task_id)
                except Exception:
                    # 如果时间解析失败，也清理掉
                    tasks_to_remove.append(task_id)
        
        for task_id in tasks_to_remove:
            del tasks_storage[task_id]
            cleanup_count += 1
    
    if cleanup_count > 0:
        logger.info(f"清理了 {cleanup_count} 个过期任务")
        invalidate_stats_cache()
    return cleanup_count

def monitor_memory_usage():
    """监控内存使用情况"""
    task_count = len(tasks_storage)
    process = psutil.Process()
    memory_mb = process.memory_info().rss / 1024 / 1024
    memory_percent = process.memory_percent()
    
    # 检查内存使用率
    memory_critical = memory_percent > MEMORY_CRITICAL_THRESHOLD
    memory_warning = memory_percent > MEMORY_WARNING_THRESHOLD
    
    # 检查任务数量
    task_critical = task_count > TASK_COUNT_CRITICAL_THRESHOLD
    task_warning = task_count > TASK_COUNT_WARNING_THRESHOLD
    
    if memory_critical or task_critical:
        logger.error(f"严重警告：内存使用率 {memory_percent:.1f}%，任务数量 {task_count}，内存使用: {memory_mb:.1f}MB")
        # 强制清理任务
        cleanup_old_tasks()
    elif memory_warning or task_warning:
        logger.warning(f"警告：内存使用率 {memory_percent:.1f}%，任务数量 {task_count}，内存使用: {memory_mb:.1f}MB")
    
    return {
        'task_count': task_count,
        'memory_mb': memory_mb,
        'memory_percent': memory_percent,
        'warning_level': 'critical' if (memory_critical or task_critical) else 'warning' if (memory_warning or task_warning) else 'normal'
    }

def periodic_cleanup():
    """定期清理任务的后台线程"""
    while True:
        try:
            time.sleep(TASK_CLEANUP_INTERVAL)
            cleanup_old_tasks()
            monitor_memory_usage()
        except Exception as e:
            logger.error(f"定期清理任务时出错: {e}")

def calculate_duration(created_at: str, completed_at: Optional[str] = None, started_at: Optional[str] = None) -> Optional[float]:
    """计算任务持续时间（秒）"""
    try:
        if completed_at:
            start_time = datetime.fromisoformat(started_at) if started_at else datetime.fromisoformat(created_at)
            end_time = datetime.fromisoformat(completed_at)
            return (end_time - start_time).total_seconds()
        return None
    except Exception:
        return None

def estimate_completion_time(task_data: Dict[str, Any], avg_duration: Optional[float]) -> Optional[str]:
    """估算任务完成时间"""
    try:
        if task_data['status'] == TaskStatus.RUNNING and avg_duration and task_data.get('started_at'):
            started_time = datetime.fromisoformat(task_data['started_at'])
            progress = task_data.get('progress', 0)
            if progress > 0:
                # 基于当前进度估算剩余时间
                elapsed = (datetime.now() - started_time).total_seconds()
                estimated_total = elapsed * 100 / progress
                estimated_completion = started_time + timedelta(seconds=estimated_total)
                return estimated_completion.isoformat()
            else:
                # 基于平均时间估算
                estimated_completion = started_time + timedelta(seconds=avg_duration)
                return estimated_completion.isoformat()
        elif task_data['status'] == TaskStatus.PENDING and avg_duration:
            # 对于等待中的任务，估算开始时间 + 平均执行时间
            created_time = datetime.fromisoformat(task_data['created_at'])
            estimated_completion = created_time + timedelta(seconds=avg_duration)
            return estimated_completion.isoformat()
        return None
    except Exception:
        return None

def get_task_statistics() -> TaskStatistics:
    """获取任务统计信息（带缓存）"""
    current_time = time.time()
    
    # 检查缓存是否有效
    with stats_cache_lock:
        if (stats_cache['data'] is not None and 
            current_time - stats_cache['last_update'] < stats_cache['cache_duration']):
            return stats_cache['data']
    
    # 缓存失效，重新计算
    with tasks_lock:
        if not tasks_storage:
            result = TaskStatistics(
                total_tasks=0,
                pending_tasks=0,
                running_tasks=0,
                completed_tasks=0,
                failed_tasks=0
            )
            # 更新缓存
            with stats_cache_lock:
                stats_cache['data'] = result
                stats_cache['last_update'] = current_time
            return result
        
        # 统计各状态任务数量
        status_counts = {status: 0 for status in TaskStatus}
        completed_durations = []
        current_running = []
        recent_completed = []
        
        for task_id, task_data in tasks_storage.items():
            status = task_data['status']
            status_counts[status] += 1
            
            # 计算已完成任务的持续时间
            if status in [TaskStatus.COMPLETED, TaskStatus.FAILED]:
                duration = calculate_duration(
                    task_data['created_at'], 
                    task_data.get('completed_at'),
                    task_data.get('started_at')
                )
                if duration is not None:
                    completed_durations.append(duration)
                
                # 收集最近完成的任务（最多10个）
                if len(recent_completed) < 10:
                    recent_completed.append(TaskSummary(
                        task_id=task_id,
                        status=status,
                        progress=task_data.get('progress'),
                        created_at=task_data['created_at'],
                        completed_at=task_data.get('completed_at'),
                        duration_seconds=duration,
                        segments_count=task_data.get('segments_count')
                    ))
            
            # 收集当前运行中的任务
            elif status == TaskStatus.RUNNING:
                current_running.append(TaskSummary(
                    task_id=task_id,
                    status=status,
                    progress=task_data.get('progress'),
                    created_at=task_data['created_at'],
                    segments_count=task_data.get('segments_count')
                ))
        
        # 计算平均持续时间
        avg_duration = sum(completed_durations) / len(completed_durations) if completed_durations else None
        
        # 估算队列等待时间（基于当前运行任务数和平均执行时间）
        queue_wait_time = None
        if avg_duration and status_counts[TaskStatus.RUNNING] > 0:
            # 简单估算：运行中任务数 * 平均执行时间 / 全局并发限制
            queue_wait_time = (status_counts[TaskStatus.RUNNING] * avg_duration) / MAX_GLOBAL_LLM_CALLS
        
        # 为运行中的任务添加预估完成时间
        for task_summary in current_running:
            task_data = tasks_storage[task_summary.task_id]
            task_summary.estimated_completion_time = estimate_completion_time(task_data, avg_duration)
        
        # 按完成时间排序最近完成的任务
        recent_completed.sort(key=lambda x: x.completed_at or '', reverse=True)
        
        result = TaskStatistics(
            total_tasks=len(tasks_storage),
            pending_tasks=status_counts[TaskStatus.PENDING],
            running_tasks=status_counts[TaskStatus.RUNNING],
            completed_tasks=status_counts[TaskStatus.COMPLETED],
            failed_tasks=status_counts[TaskStatus.FAILED],
            average_duration_seconds=avg_duration,
            estimated_queue_wait_time_seconds=queue_wait_time,
            current_running_tasks=current_running,
            recent_completed_tasks=recent_completed
        )
        
        # 更新缓存
        with stats_cache_lock:
            stats_cache['data'] = result
            stats_cache['last_update'] = current_time
            
        return result

def create_agent_instance():
    """创建Agent实例"""
    # 首先检查MCP服务器是否可用
    mcp_server_url = 'http://localhost:8000'
    try:
        # 设置较短的超时时间
        response = requests.get(f"{mcp_server_url}/health", timeout=5)
        if response.status_code != 200:
            logger.warning(f"MCP服务器健康检查失败，状态码: {response.status_code}")
    except requests.exceptions.RequestException as e:
        logger.error(f"无法连接到MCP服务器 {mcp_server_url}: {e}")
        raise Exception(f"MCP服务器不可用: {e}")
    
    try:
        # 从共享配置模块导入LLM配置
        import sys
        import os
        sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
        from shared.config.llm_config import llm_cfg
    except ImportError as e:
        logger.error(f"无法导入LLM配置: {e}")
        # 使用默认配置作为后备
        llm_cfg = {
            'model': 'qwen-plus',
            'model_server': 'dashscope',
            'api_key': os.getenv('DASHSCOPE_API_KEY', 'sk-481bb0e7a00c467ca9a0ebd38001be37'),
            'generate_cfg': {
                'temperature': 0.3,
                'top_p': 0.8
            }
        }

    # 定义MCP服务配置 - 现在只有一个统一的工具
    tools = [{
        "mcpServers": {
            "xinjinchengmcp": {
                "url": "http://localhost:8000/sse"
            }
        }
    }]

    with open(os.path.join(os.path.dirname(__file__), 'system_prompt.txt'), 'r', encoding='utf-8') as f:
        system_prompt = f.read()
    
    # 添加当前时间信息到system_prompt
    from datetime import datetime
    current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    system_prompt += f"\n\n当前时间: {current_time}"

    try:
        # 创建Agent实例，设置超时
        bot = Assistant(
            llm=llm_cfg,
            name='报告更新专家',
            description='报告更新专家专注于通过智能工具动态生成准确的报告内容，确保数据始终保持最新状态。',
            system_message=system_prompt,
            function_list=tools,
        )
        logger.debug("Agent实例创建成功")
        return bot
    except Exception as e:
        logger.error(f"创建Agent实例时发生错误: {e}")
        raise Exception(f"Agent实例创建失败: {e}")

def get_agent_from_pool():
    """从池中获取Agent实例，严格控制不绕过池限制"""
    try:
        # 尝试从池中获取现有实例
        return agent_pool.get_nowait()
    except:
        # 池为空时，等待一定时间而不是创建新实例
        try:
            logger.warning("Agent池暂时为空，等待可用实例...")
            # 使用阻塞获取，考虑到LLM生成时间较长，设置合理的超时时间
            agent = agent_pool.get(timeout=AGENT_POOL_TIMEOUT)
            logger.info("成功获取到Agent实例")
            return agent
        except:
            # 如果等待超时，说明系统负载过高或任务处理时间异常长
            logger.error(f"Agent池获取超时（{AGENT_POOL_TIMEOUT}秒），系统负载过高")
            raise HTTPException(
                status_code=503, 
                detail=f"服务暂时不可用，系统负载过高，请稍后重试。当前等待时间超过{AGENT_POOL_TIMEOUT}秒"
            )

def return_agent_to_pool(agent):
    """将Agent实例返回到池中"""
    try:
        agent_pool.put_nowait(agent)
    except:
        # 池已满，直接丢弃
        pass

def init_agent_pool():
    """线程安全的Agent池初始化，确保只执行一次"""
    global _agent_pool_initialized
    
    with _agent_pool_init_lock:
        if _agent_pool_initialized:
            logger.debug("Agent池已经初始化，跳过重复初始化")
            return
        
        logger.info("🚀 开始初始化Agent实例池...")
        
        successful_agents = 0
        failed_agents = 0
        
        for i in range(MAX_GLOBAL_LLM_CALLS):
            try:
                logger.debug(f"创建Agent实例 {i+1}/{MAX_GLOBAL_LLM_CALLS}...")
                agent = create_agent_instance()
                agent_pool.put_nowait(agent)
                successful_agents += 1
                logger.debug(f"Agent实例 {i+1} 创建成功")
            except Exception as e:
                failed_agents += 1
                logger.error(f"创建Agent实例 {i+1} 失败: {e}")
        
        _agent_pool_initialized = True
        
        if successful_agents > 0:
            logger.info(f"✅ Agent池初始化完成: {successful_agents} 个成功, {failed_agents} 个失败")
        else:
            logger.error(f"❌ Agent池初始化失败: 没有成功创建任何Agent实例")
            raise Exception("Agent池初始化完全失败")

# 保持向后兼容
def init_agent_service():
    """向后兼容的Agent服务初始化"""
    return get_agent_from_pool()

def process_single_segment(segment, common_params, segment_index):
    """处理单个segment的函数"""
    bot = None
    try:
        bot = get_agent_from_pool()
        
        # 使用RAG搜索相关函数
        rag_context = ""
        try:
            from .rag_knowledge_base import get_rag_knowledge_base
            rag_kb = get_rag_knowledge_base()
            
            # 从segment内容中提取查询关键词
            segment_text = extract_keywords_from_segment(segment)
            
            if segment_text.strip():
                rag_context = rag_kb.get_functions_context(segment_text, top_k=15)
                logger.info(f"Generated RAG context for segment {segment_index}: {len(rag_context)} characters")
            else:
                rag_context = "未能从segment中提取有效查询信息。"
                
        except Exception as e:
            logger.warning(f"RAG搜索失败，继续使用默认处理: {e}")
            rag_context = "RAG服务暂时不可用，请根据segment内容直接选择合适的函数。"
        
        # Construct a new request for each segment with RAG context
        segment_request_data = {
            "common_params": common_params,
            "content_segments": [segment]
        }
        
        # 将RAG上下文添加到消息中
        user_message = f"""以下是可用的MCP函数信息：

{rag_context}

请处理以下segment：

{json.dumps(segment_request_data, ensure_ascii=False)}"""
        
        messages = [{'role': 'user', 'content': [{'text': user_message}]}]
        
        # 修改 Agent 调用逻辑，实现真正的“短路”
        response_stream = bot.run(messages)
        
        final_response_list = []
        for response in response_stream:
            # response 是一个消息列表
            if not response:
                continue

            final_response_list = response

            # **【终极优化】在消息流中提前拦截**
            # 检查最新的消息是否是来自 'process_and_update_table' 的函数返回
            latest_message = final_response_list[-1]
            if latest_message.get('role') == 'function' and 'xinjinchengmcp-process_and_update_table' in latest_message.get('name', ''):
                logger.info("提前拦截到 'process_and_update_table' 的结果，执行终极“短路”优化。")
                try:
                    updated_segment_str = latest_message.get('content', '{}')
                    updated_segment = json.loads(updated_segment_str)
                    
                    if isinstance(updated_segment, dict) and 'current_location' in updated_segment:
                        del updated_segment['current_location']
                    
                    logger.info(f"终极短路成功，直接返回工具处理后的 segment {segment_index}")
                    return segment_index, updated_segment
                except json.JSONDecodeError as e:
                    logger.error(f"终极短路优化失败：解析工具返回内容时出错: {e}。内容: {latest_message.get('content', '')[:200]}...")
                    # 如果解析失败，不中断流程，让LLM继续处理
                except Exception as e:
                    logger.error(f"终极短路优化时发生未知错误: {e}")
                    # 降级处理
        
        # 如果没有触发短路，则走原始的完整流程
        logger.info(f"Agent response for segment {segment_index}: {final_response_list}")
        
        # 从最终的响应列表中获取最后一条 assistant 的消息
        content = ""
        for msg in reversed(final_response_list):
            if msg.get('role') == 'assistant' and msg.get('content'):
                content = msg['content']
                break
        
        if not content:
            logger.error(f"未能从最终响应中找到有效的 assistant content。响应: {final_response_list}")
            return segment_index, segment

        # Extract json from markdown code block
        match = re.search(r"```json\n(.*?)\n```", content, re.DOTALL)
        if match:
            json_text = match.group(1).strip()
        else:
            json_text = content

        # 增强JSON解析健壮性，处理多种格式和错误
        content_json = None
        parse_attempts = [
            lambda: json.loads(json_text),  # 原始解析
            lambda: json.loads(json_text.split('\n')[0]),  # 只解析第一行
            lambda: json.loads(re.sub(r'\n.*$', '', json_text, flags=re.MULTILINE)),  # 移除多余行
            lambda: json.loads(re.search(r'\{.*\}', json_text, re.DOTALL).group()) if re.search(r'\{.*\}', json_text, re.DOTALL) else None,  # 提取JSON对象
        ]
        
        for i, parse_func in enumerate(parse_attempts):
            try:
                content_json = parse_func()
                if content_json is not None:
                    if i > 0:
                        logger.info(f"JSON解析成功（尝试 {i+1}）")
                    break
            except json.JSONDecodeError as e:
                if i == 0:
                    logger.warning(f"标准JSON解析失败: {e}")
                continue
            except Exception as e:
                logger.debug(f"解析尝试 {i+1} 失败: {e}")
                continue
        
        if content_json is None:
            logger.error(f"所有JSON解析尝试都失败，原始内容: {json_text[:200]}...")
            # 如果解析完全失败，返回原始segment
            return segment_index, segment
        if 'content_segments' in content_json and content_json['content_segments']:
            # 获取第一个content_segment
            segment_data = content_json['content_segments'][0]
            
            # 检查并移除current_location字段
            if isinstance(segment_data, dict) and 'current_location' in segment_data:
                segment_data = segment_data.copy()  # 创建副本避免修改原数据
                del segment_data['current_location']
            
            return segment_index, segment_data
        else:
            return segment_index, segment
            
    except Exception as e:
        logger.error(f"Error processing segment {segment_index}: {e}")
        return segment_index, segment
    finally:
        # 将Agent返回到池中
        if bot:
            return_agent_to_pool(bot)

def process_segments_parallel(request: ReportRequest, task_id: str):
    """并行处理所有segments"""
    try:
        with tasks_lock:
            tasks_storage[task_id]['status'] = TaskStatus.RUNNING
            tasks_storage[task_id]['progress'] = 0
            tasks_storage[task_id]['started_at'] = datetime.now().isoformat()
        invalidate_stats_cache()
        
        updated_segments = [None] * len(request.content_segments)
        
        # 使用全局executor确保最多MAX_GLOBAL_LLM_CALLS个并发调用大模型
        future_to_index = {
            global_executor.submit(process_single_segment, segment, request.common_params, i): i 
            for i, segment in enumerate(request.content_segments)
        }
        
        completed_count = 0
        for future in future_to_index:
            try:
                segment_index, processed_segment = future.result()
                updated_segments[segment_index] = processed_segment
                completed_count += 1
                
                # 更新进度
                progress = int((completed_count / len(request.content_segments)) * 100)
                with tasks_lock:
                    tasks_storage[task_id]['progress'] = progress
                invalidate_stats_cache()
                    
            except Exception as e:
                logger.error(f"Error in future result: {e}", exc_info=True)
                segment_index = future_to_index[future]
                updated_segments[segment_index] = request.content_segments[segment_index]
        
        final_result = {"content_segments": updated_segments}
        
        with tasks_lock:
            tasks_storage[task_id]['status'] = TaskStatus.COMPLETED
            tasks_storage[task_id]['result'] = final_result
            tasks_storage[task_id]['completed_at'] = datetime.now().isoformat()
            tasks_storage[task_id]['progress'] = 100
        invalidate_stats_cache()
            
        logger.info(f"Task {task_id} completed successfully")
        
    except Exception as e:
        logger.error(f"Error in parallel processing for task {task_id}: {e}", exc_info=True)
        with tasks_lock:
            tasks_storage[task_id]['status'] = TaskStatus.FAILED
            tasks_storage[task_id]['error'] = str(e)
            tasks_storage[task_id]['completed_at'] = datetime.now().isoformat()
        invalidate_stats_cache()
    finally:
        # 异步任务完成后，从运行中任务列表移除并释放信号量
        with async_queue_lock:
            if task_id in running_async_tasks:
                del running_async_tasks[task_id]
                async_task_semaphore.release()
                logger.info(f"异步任务 {task_id} 完成，释放并发槽位。当前运行中任务数: {len(running_async_tasks)}")

def async_task_worker():
    """异步任务工作线程，负责从队列中取任务并执行"""
    while True:
        try:
            # 从队列中获取任务
            task_item = async_task_queue.get()
            if task_item is None:  # 停止信号
                break
                
            request, task_id = task_item
            logger.info(f"开始处理队列中的异步任务: {task_id}")
            
            # 获取信号量，控制并发数量
            async_task_semaphore.acquire()
            
            try:
                # 创建处理线程
                thread = threading.Thread(target=process_segments_parallel, args=(request, task_id))
                thread.daemon = True
                
                # 记录运行中的任务
                with async_queue_lock:
                    running_async_tasks[task_id] = thread
                
                # 启动任务
                thread.start()
                logger.info(f"异步任务 {task_id} 已启动，当前运行中任务数: {len(running_async_tasks)}")
                
            except Exception as e:
                logger.error(f"启动异步任务 {task_id} 失败: {e}")
                # 启动失败时释放信号量
                async_task_semaphore.release()
                with async_queue_lock:
                    if task_id in running_async_tasks:
                        del running_async_tasks[task_id]
                # 更新任务状态为失败
                with tasks_lock:
                    if task_id in tasks_storage:
                        tasks_storage[task_id]['status'] = TaskStatus.FAILED
                        tasks_storage[task_id]['error'] = f"任务启动失败: {str(e)}"
                        tasks_storage[task_id]['completed_at'] = datetime.now().isoformat()
                invalidate_stats_cache()
            
            # 标记队列任务完成
            async_task_queue.task_done()
            
        except Exception as e:
            logger.error(f"异步任务工作线程出错: {e}")
            time.sleep(1)  # 避免错误循环

def get_async_queue_status():
    """获取异步任务队列状态"""
    with async_queue_lock:
        running_count = len(running_async_tasks)
        waiting_count = async_task_queue.qsize()
        available_slots = MAX_CONCURRENT_ASYNC_TASKS - running_count
        
        # 获取当前配置的队列大小
        current_queue_size = int(os.getenv("MAX_ASYNC_QUEUE_SIZE", str(MAX_QUEUE_SIZE_DEFAULT)))
        critical_queue_size = current_queue_size + 100
        
        # 计算队列健康状态
        if waiting_count >= critical_queue_size:
            queue_health = "critical"
        elif waiting_count >= current_queue_size:
            queue_health = "warning"
        elif waiting_count >= current_queue_size * 0.8:
            queue_health = "busy"
        else:
            queue_health = "normal"
        
        return {
            'max_concurrent_tasks': MAX_CONCURRENT_ASYNC_TASKS,
            'running_tasks': running_count,
            'waiting_tasks': waiting_count,
            'available_slots': available_slots,
            'running_task_ids': list(running_async_tasks.keys()),
            'max_queue_size': current_queue_size,
            'critical_queue_size': critical_queue_size,
            'queue_health': queue_health,
            'queue_utilization_percent': round((waiting_count / current_queue_size) * 100, 1)
        }

@app.post("/api/reports/full-update")
def run_query(request: ReportRequest):
    """同步处理接口（保持向后兼容）"""
    logger.debug(f"Received synchronous request: {request.model_dump_json()}")
    logger.info("Processing synchronous report update request")
    
    # 创建临时任务ID用于同步处理
    task_id = str(uuid.uuid4())
    
    with tasks_lock:
        tasks_storage[task_id] = {
            'status': TaskStatus.PENDING,
            'progress': 0,
            'result': None,
            'error': None,
            'created_at': datetime.now().isoformat(),
            'completed_at': None,
            'started_at': None,
            'segments_count': len(request.content_segments)
        }
    invalidate_stats_cache()
    
    # 直接执行并行处理
    process_segments_parallel(request, task_id)
    
    # 返回结果
    with tasks_lock:
        task_data = tasks_storage[task_id]
        if task_data['status'] == TaskStatus.COMPLETED:
            result = task_data['result']
            # 清理临时任务
            del tasks_storage[task_id]
            invalidate_stats_cache()
            logger.info("Synchronous request completed successfully")
            return result
        else:
            # 清理临时任务
            error_msg = task_data.get('error', 'Unknown error')
            del tasks_storage[task_id]
            invalidate_stats_cache()
            raise HTTPException(status_code=500, detail=f"Processing failed: {error_msg}")

@app.post("/api/reports/async-update", response_model=TaskResponse)
def create_async_task(request: ReportRequest):
    """异步创建任务接口"""
    logger.debug(f"Received async request: {request.model_dump_json()}")
    logger.info(f"Created async task with {len(request.content_segments)} segments")
    
    # 检查队列状态，实现背压机制
    queue_status = get_async_queue_status()
    
    # 设置队列最大长度限制（支持大量任务排队的异步处理）
    # 可以通过环境变量配置，默认200个任务，适应离线处理场景
    MAX_QUEUE_SIZE = int(os.getenv("MAX_ASYNC_QUEUE_SIZE", str(MAX_QUEUE_SIZE_DEFAULT)))
    
    # 更宽松的背压策略：只有在极端情况下才拒绝
    CRITICAL_QUEUE_SIZE = MAX_QUEUE_SIZE + 100  # 允许超过标准队列大小
    
    if queue_status['waiting_tasks'] >= CRITICAL_QUEUE_SIZE:
        logger.error(f"任务队列达到临界值 ({queue_status['waiting_tasks']}/{CRITICAL_QUEUE_SIZE})，拒绝新任务")
        
        # 计算更精确的重试时间
        estimated_wait_hours = queue_status['waiting_tasks'] * 3 / 60  # 假设每个任务3分钟
        retry_after_seconds = min(estimated_wait_hours * 3600, 7200)  # 最多建议2小时后重试
        
        raise HTTPException(
            status_code=429, 
            detail={
                "message": "系统负载极高，队列已达临界值，请稍后重试",
                "queue_status": queue_status,
                "retry_after_seconds": retry_after_seconds,
                "max_queue_size": MAX_QUEUE_SIZE,
                "critical_queue_size": CRITICAL_QUEUE_SIZE,
                "suggestion": "当前系统处理大量任务，建议2小时后重试或联系管理员"
            }
        )
    elif queue_status['waiting_tasks'] >= MAX_QUEUE_SIZE:
        # 超过标准队列大小但未达到临界值，给出警告但仍接受任务
        logger.warning(f"任务队列超过标准大小 ({queue_status['waiting_tasks']}/{MAX_QUEUE_SIZE})，但仍接受任务")
    
    # 生成唯一任务ID
    task_id = str(uuid.uuid4())
    
    # 初始化任务状态
    with tasks_lock:
        tasks_storage[task_id] = {
            'status': TaskStatus.PENDING,
            'progress': 0,
            'result': None,
            'error': None,
            'created_at': datetime.now().isoformat(),
            'completed_at': None,
            'started_at': None,
            'segments_count': len(request.content_segments),
            'queue_position': queue_status['waiting_tasks'] + 1  # 队列位置
        }
    invalidate_stats_cache()
    
    # 将任务添加到队列中，而不是直接创建线程
    async_task_queue.put((request, task_id))
    
    # 计算更准确的预估等待时间
    if queue_status['available_slots'] > 0:
        estimated_wait_time = "立即开始处理"
    else:
        # 考虑到有3个并发任务，等待时间会更短
        estimated_minutes = max(1, queue_status['waiting_tasks'] * 3 // MAX_CONCURRENT_ASYNC_TASKS)
        if estimated_minutes < 60:
            estimated_wait_time = f"预计等待 {estimated_minutes} 分钟"
        else:
            estimated_hours = estimated_minutes // 60
            estimated_wait_time = f"预计等待 {estimated_hours} 小时 {estimated_minutes % 60} 分钟"
    
    # 队列状态提示
    queue_status_message = ""
    if queue_status['waiting_tasks'] >= MAX_QUEUE_SIZE:
        queue_status_message = " (队列较满，处理时间可能较长)"
    
    logger.info(f"异步任务 {task_id} 已加入队列，队列位置: {queue_status['waiting_tasks'] + 1}")
    return TaskResponse(
        task_id=task_id,
        status=TaskStatus.PENDING,
        message=f"任务已创建并加入队列，{estimated_wait_time}{queue_status_message}"
    )

@app.get("/api/reports/task-status/{task_id}", response_model=TaskStatusResponse)
def get_task_status(task_id: str):
    """查询任务状态接口"""
    # 先获取统计信息，避免在持有tasks_lock时调用get_task_statistics
    stats = get_task_statistics()
    
    with tasks_lock:
        if task_id not in tasks_storage:
            raise HTTPException(status_code=404, detail="任务不存在")
        
        task_data = tasks_storage[task_id]
        
        # 计算持续时间
        duration = calculate_duration(
            task_data['created_at'],
            task_data.get('completed_at'),
            task_data.get('started_at')
        )
        
        # 使用预先获取的统计信息计算预估完成时间
        estimated_completion = estimate_completion_time(task_data, stats.average_duration_seconds)
        
        return TaskStatusResponse(
            task_id=task_id,
            status=task_data['status'],
            progress=task_data['progress'],
            result=task_data['result'],
            error=task_data['error'],
            created_at=task_data['created_at'],
            completed_at=task_data['completed_at'],
            estimated_completion_time=estimated_completion,
            duration_seconds=duration
        )

@app.get("/api/reports/statistics", response_model=TaskStatistics)
def get_statistics():
    """获取任务统计信息接口"""
    logger.info("Fetching task statistics")
    return get_task_statistics()

@app.get("/api/reports/queue-status")
def get_queue_status():
    """获取异步任务队列状态"""
    logger.info("Fetching async task queue status")
    return get_async_queue_status()

@app.get("/api/reports/tasks")
def list_tasks(status: Optional[str] = None, limit: int = 50):
    """列出任务列表，支持按状态过滤"""
    # 先获取统计信息（包含平均执行时间），避免在持有tasks_lock时调用get_task_statistics
    stats = get_task_statistics()
    
    with tasks_lock:
        tasks = []
        for task_id, task_data in tasks_storage.items():
            if status and task_data['status'] != status:
                continue
                
            duration = calculate_duration(
                task_data['created_at'],
                task_data.get('completed_at'),
                task_data.get('started_at')
            )
            
            task_summary = TaskSummary(
                task_id=task_id,
                status=task_data['status'],
                progress=task_data.get('progress'),
                created_at=task_data['created_at'],
                completed_at=task_data.get('completed_at'),
                duration_seconds=duration,
                segments_count=task_data.get('segments_count')
            )
            
            # 为运行中和等待中的任务添加预估完成时间
            if task_data['status'] in [TaskStatus.RUNNING, TaskStatus.PENDING]:
                task_summary.estimated_completion_time = estimate_completion_time(
                    task_data, stats.average_duration_seconds
                )
            
            tasks.append(task_summary)
        
        # 按创建时间倒序排列
        tasks.sort(key=lambda x: x.created_at, reverse=True)
        
        return {
            "tasks": tasks[:limit],
            "total": len(tasks),
            "filtered_by_status": status
        }

# RAG搜索相关端点
@app.get("/api/rag/search")
def rag_search(query: str, top_k: int = 10):
    """RAG智能搜索函数"""
    try:
        from .rag_knowledge_base import get_rag_knowledge_base
        rag_kb = get_rag_knowledge_base()
        results = rag_kb.search_relevant_functions(query, top_k=top_k)
        return {
            "query": query,
            "results": results,
            "count": len(results)
        }
    except Exception as e:
        logger.error(f"RAG搜索失败: {e}")
        raise HTTPException(status_code=500, detail=f"RAG搜索失败: {str(e)}")

@app.post("/api/rag/refresh")
def refresh_rag_knowledge_base():
    """全量刷新RAG知识库"""
    try:
        from .rag_knowledge_base import get_rag_knowledge_base
        rag_kb = get_rag_knowledge_base()
        success = rag_kb.refresh_knowledge_base()
        if success:
            return {"message": "知识库全量刷新成功"}
        else:
            raise HTTPException(status_code=500, detail="知识库刷新失败")
    except Exception as e:
        logger.error(f"刷新知识库失败: {e}")
        raise HTTPException(status_code=500, detail=f"刷新知识库失败: {str(e)}")

@app.post("/api/rag/update")
def update_rag_knowledge_base():
    """增量更新RAG知识库"""
    try:
        from .rag_knowledge_base import get_rag_knowledge_base
        rag_kb = get_rag_knowledge_base()
        
        # 添加调试信息
        logger.info(f"RAG更新请求 - 嵌入文件存在: {rag_kb.embeddings_file.exists()}")
        logger.info(f"RAG更新请求 - 嵌入文件路径: {rag_kb.embeddings_file}")
        
        success = rag_kb.load_function_knowledge_base()
        if success:
            logger.info("✅ 知识库增量更新成功")
            return {"message": "知识库增量更新成功"}
        else:
            logger.error("❌ 知识库增量更新失败")
            raise HTTPException(status_code=500, detail="知识库增量更新失败")
    except Exception as e:
        logger.error(f"增量更新知识库失败: {e}")
        raise HTTPException(status_code=500, detail=f"增量更新知识库失败: {str(e)}")

class RemoveFunctionRequest(BaseModel):
    function_name: str

@app.post("/api/rag/remove-function")
def remove_function_from_knowledge_base(request: RemoveFunctionRequest):
    """从知识库中删除指定函数的向量嵌入"""
    try:
        from .rag_knowledge_base import get_rag_knowledge_base
        rag_kb = get_rag_knowledge_base()
        success = rag_kb.remove_function_embedding(request.function_name)
        if success:
            return {"message": f"已从知识库中删除函数 '{request.function_name}' 的向量嵌入"}
        else:
            return {"message": f"函数 '{request.function_name}' 在知识库中不存在或删除失败"}
    except Exception as e:
        logger.error(f"删除函数向量嵌入失败: {e}")
        raise HTTPException(status_code=500, detail=f"删除函数向量嵌入失败: {str(e)}")


@app.post("/api/rag/clear")
def clear_rag_knowledge_base():
    """清空RAG知识库（不重新加载）"""
    try:
        from .rag_knowledge_base import get_rag_knowledge_base
        rag_kb = get_rag_knowledge_base()
        
        # 删除知识库相关文件
        files_to_clean = [
            rag_kb.embeddings_file,
            rag_kb.knowledge_base_dir / "function_knowledge_base.json",
            rag_kb.knowledge_base_dir / "function_knowledge_base.txt"
        ]
        
        deleted_files = []
        for file_path in files_to_clean:
            if file_path.exists():
                file_path.unlink()
                deleted_files.append(file_path.name)
                logger.info(f"已删除知识库文件: {file_path.name}")
        
        # 清空内存中的数据
        rag_kb.functions_df = None
        
        return {"message": f"知识库已清空，删除了 {len(deleted_files)} 个文件", "deleted_files": deleted_files}
    except Exception as e:
        logger.error(f"清空知识库失败: {e}")
        raise HTTPException(status_code=500, detail=f"清空知识库失败: {str(e)}")

# 系统健康检查和统计端点
@app.get("/api/system/health")
def system_health():
    """系统健康检查"""
    try:
        # 检查RAG知识库状态
        rag_status = "unknown"
        try:
            from .rag_knowledge_base import get_rag_knowledge_base
            rag_kb = get_rag_knowledge_base()
            if rag_kb.functions_df is not None and not rag_kb.functions_df.empty:
                rag_status = "healthy"
            else:
                rag_status = "no_data"
        except Exception:
            rag_status = "error"
        
        # 获取内存监控信息
        memory_info = monitor_memory_usage()
        
        return {
            "status": "healthy",
            "service": "Agent Client",
            "version": "2.0.0",
            "rag_status": rag_status,
            "memory_info": memory_info,
            "agent_pool_size": agent_pool.qsize(),
            "tasks_count": len(tasks_storage)
        }
    except Exception as e:
        logger.error(f"健康检查失败: {e}")
        return {
            "status": "unhealthy",
            "error": str(e)
        }

@app.get("/api/tasks/stats")
def get_task_stats():
    """获取任务统计信息（简化版）"""
    try:
        stats = get_task_statistics()
        return {
            "total": stats.total_tasks,
            "running": stats.running_tasks,
            "completed": stats.completed_tasks,
            "pending": stats.pending_tasks,
            "failed": stats.failed_tasks
        }
    except Exception as e:
        logger.error(f"获取任务统计失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取任务统计失败: {str(e)}")

# 异步任务工作线程引用
async_task_worker_thread = None

# 应用启动状态跟踪
_app_initialized = False
_app_init_lock = threading.Lock()

# 应用启动时初始化
def startup_initialization():
    """应用启动时的初始化函数，确保只执行一次"""
    global async_task_worker_thread, _app_initialized
    
    with _app_init_lock:
        if _app_initialized:
            logger.debug("应用已经初始化，跳过重复初始化")
            return
        
        logger.info("🚀 开始初始化MCP Agent Client应用...")
        
        try:
            # 1. 初始化Agent池
            init_agent_pool()
            
            # 2. 启动异步任务工作线程
            if async_task_worker_thread is None:
                async_task_worker_thread = threading.Thread(target=async_task_worker)
                async_task_worker_thread.daemon = True
                async_task_worker_thread.start()
                logger.info("📋 异步任务工作线程已启动")
            
            # 3. 启动后台清理线程
            cleanup_thread = threading.Thread(target=periodic_cleanup)
            cleanup_thread.daemon = True
            cleanup_thread.start()
            logger.info("🧹 后台清理线程已启动")
            
            # 4. 输出系统配置信息
            current_queue_size = int(os.getenv("MAX_ASYNC_QUEUE_SIZE", str(MAX_QUEUE_SIZE_DEFAULT)))
            logger.info("📊 系统配置:")
            logger.info(f"   ├─ 最大并发异步任务: {MAX_CONCURRENT_ASYNC_TASKS}")
            logger.info(f"   ├─ 全局LLM调用限制: {MAX_GLOBAL_LLM_CALLS}")
            logger.info(f"   ├─ 标准队列大小: {current_queue_size}")
            logger.info(f"   ├─ 临界队列大小: {current_queue_size + 100}")
            logger.info(f"   ├─ Agent池获取超时: {AGENT_POOL_TIMEOUT}秒")
            logger.info(f"   └─ Agent池大小: {MAX_GLOBAL_LLM_CALLS}")
            
            if current_queue_size >= 200:
                logger.info("💡 大容量队列配置已启用，支持大量任务排队处理")
            
            _app_initialized = True
            logger.info("✅ MCP Agent Client应用初始化完成")
            
        except Exception as e:
            logger.error(f"❌ 应用初始化失败: {e}")
            # 不抛出异常，允许应用继续启动，但会影响功能

# 应用关闭时清理
def shutdown_cleanup():
    """应用关闭时的清理函数"""
    logger.info("开始关闭应用...")
    
    try:
        # 停止异步任务工作线程
        logger.info("正在停止异步任务工作线程...")
        async_task_queue.put(None)  # 发送停止信号
        
        # 等待当前运行的异步任务完成（最多等待30秒）
        with async_queue_lock:
            running_tasks_count = len(running_async_tasks)
        
        if running_tasks_count > 0:
            logger.info(f"等待 {running_tasks_count} 个运行中的异步任务完成...")
            wait_time = 0
            max_wait_time = 30  # 最多等待30秒
            
            while wait_time < max_wait_time:
                with async_queue_lock:
                    if len(running_async_tasks) == 0:
                        break
                time.sleep(1)
                wait_time += 1
            
            with async_queue_lock:
                remaining_tasks = len(running_async_tasks)
            if remaining_tasks > 0:
                logger.warning(f"强制关闭应用，仍有 {remaining_tasks} 个任务在运行")
            else:
                logger.info("所有异步任务已完成")
        
        # 关闭全局线程池
        global_executor.shutdown(wait=True)
        logger.info("全局线程池已关闭")
        
    except Exception as e:
        logger.error(f"关闭应用时出错: {e}")
    
    logger.info("应用关闭完成")

# 初始化将在FastAPI启动事件中执行，不在模块加载时执行

if __name__ == '__main__':
    import signal
    import sys
    
    def signal_handler(sig, frame):
        """信号处理函数"""
        logger.info(f"接收到信号 {sig}，开始关闭应用...")
        shutdown_cleanup()
        sys.exit(0)
    
    # 注册信号处理器
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    
    try:
        logger.info("启动FastAPI应用...")
        uvicorn.run(app, host="0.0.0.0", port=8080)
    except KeyboardInterrupt:
        logger.info("接收到键盘中断信号")
    except Exception as e:
        logger.error(f"应用运行时发生错误: {e}")
    finally:
        shutdown_cleanup()