import sys
import os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))

import logging
from datetime import datetime
from shared.utils.MySQLUtil import MySQLUtil
from fastmcp import FastMCP
from fastmcp.exceptions import ToolError
from fastmcp.tools.tool import ToolAnnotations
from typing import Annotated, Dict, Any, Optional
from pydantic import Field
import json
from starlette.requests import Request
from starlette.responses import JSONResponse
import sys
from pathlib import Path
import asyncio
from concurrent.futures import ThreadPoolExecutor
# 添加 mcp_generator 路径
mcp_generator_path = Path(__file__).parent.parent.parent / "services" / "mcp-generator"
sys.path.append(str(mcp_generator_path))
from mcp_generator.function_manager import FunctionManager

# 新增导入
from shared.utils.CodeExecutor import execute as execute_code, CodeExecutionError
from shared.config import llm_config
import dashscope
from http import HTTPStatus
from decimal import Decimal

# 创建线程池用于执行同步的LLM调用
_llm_executor = ThreadPoolExecutor(max_workers=10, thread_name_prefix="llm_call")


def _decimal_default(obj):
    """处理 JSON 序列化时的 Decimal 类型转换"""
    if isinstance(obj, Decimal):
        return float(obj)
    raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable")


def _get_text_from_llm_response(response) -> Optional[str]:
    """安全地从DashScope LLM响应中提取文本内容。"""
    if not response:
        return None
    try:
        # 优先尝试 qwen-plus 等模型的结构
        if response.output and response.output.choices and response.output.choices[0].message:
            return response.output.choices[0].message.content
        # 其次尝试通用文本模型的结构
        if response.output and response.output.text:
            return response.output.text
    except (AttributeError, IndexError):
        # 如果结构不匹配，记录警告并尝试备用方案
        logger.warning("无法从标准响应结构中提取文本，尝试直接访问 'text' 字段。")
        pass
    
    # 最后的备用方案
    try:
        if response.output and response.output.get("text"):
            return response.output.get("text")
    except Exception:
        pass
        
    logger.error(f"无法从LLM响应中提取有效的文本内容。响应: {response}")
    return None

# Initialize FastMCP server with explicit configuration
# - name: Human-readable server identifier
# - mask_error_details: Security setting to control error message exposure
mcp = FastMCP(
    name="xjcmcpServer",
    mask_error_details=True,
    host="0.0.0.0", port=8000  # Hide internal error details from clients
)

db = MySQLUtil()

logger = logging.getLogger("xjcmcp")
logging.basicConfig(level=logging.INFO)

# Initialize function manager
function_registry_path = os.path.join(os.path.dirname(__file__), "function_registry.json")
function_manager = FunctionManager(function_registry_path)

# Health check endpoint using FastMCP custom route
@mcp.custom_route("/health", methods=["GET"])
async def health_check(request: Request) -> JSONResponse:
    """Health check endpoint for monitoring server status"""
    try:
        # Test database connection
        db.test_connection()
        return JSONResponse({"status": "healthy", "timestamp": datetime.now().isoformat()})
    except Exception as e:
        logger.error(f"Health check failed: {e}")
        return JSONResponse({"status": "unhealthy", "error": str(e)}, status_code=500)

# Unified MCP tool that can call any registered function
@mcp.tool(
    name="call_indicator_function",
    description="统一的指标数据获取工具。可以调用任何已注册的指标计算函数。通过function_name参数指定要调用的函数，通过parameters字典传递函数所需的参数。",
    annotations=ToolAnnotations(
        title="指标数据获取工具",
        readOnlyHint=True,
        idempotentHint=True
    )
)
def call_indicator_function(
    function_name: Annotated[str, Field(description="要调用的函数名称（指标名称）")],
    parameters: Annotated[Dict[str, Any], Field(description="函数参数字典，包含所有需要传递给函数的参数")] = None
) -> dict:
    """
    统一的指标数据获取工具
    
    通过此工具可以调用任何已注册的指标计算函数。
    
    Args:
        function_name: 要调用的函数名称（通常是指标名称）
        parameters: 包含函数所需参数的字典
        
    Returns:
        dict: 函数执行结果
        
    Raises:
        ToolError: 当函数不存在或执行失败时
    """
    try:
        logger.info(f"调用指标函数: {function_name}, 参数: {parameters}")
        
        # 检查函数是否存在
        function_info = function_manager.get_function_info(function_name)
        if not function_info:
            available_functions = list(function_manager.functions.keys())
            raise ToolError(f"函数 '{function_name}' 不存在。可用函数: {available_functions}")
        
        # 准备参数
        if parameters is None:
            parameters = {}
        
        # 添加数据库连接到参数中
        parameters['db'] = db
        
        # 调用函数
        result = function_manager.call_function(function_name, **parameters)
        
        logger.info(f"函数 {function_name} 执行成功")
        return result
        
    except ValueError as e:
        logger.error(f"函数调用错误: {str(e)}")
        raise ToolError(str(e))
    except Exception as e:
        logger.error(f"函数 {function_name} 执行异常: {str(e)}", exc_info=True)
        raise ToolError(f"函数 '{function_name}' 执行失败: {str(e)}")

# 表格处理工具：process_and_update_table
@mcp.tool(
    name="process_and_update_table",
    description="专门处理报告中的表格（table）类型片段。接收原始表格片段和通用参数，返回数据更新后的完整表格片段。",
    annotations=ToolAnnotations(
        title="表格片段更新工具",
        readOnlyHint=False,
        idempotentHint=False
    )
)
async def process_and_update_table(
    original_segment: Annotated[Dict[str, Any], Field(description="报告中原始的 `type` 为 `table` 的 segment 对象")],
    common_params: Annotated[Dict[str, Any], Field(description="报告的通用参数，如 project_id 等")],
    function_name_to_call: Annotated[str, Field(description="由主 Agent 选定的、用于获取此表格数据的确切指标函数名称")],
    parameters_for_call: Annotated[Dict[str, Any], Field(description="调用上述指标函数所需的、已由主 Agent 准备好的参数字典")]
) -> dict:
    """
    接收一个表格 segment 和一个明确的指令（要调用哪个函数），然后动态生成并执行代码来获取和处理数据，最终返回更新后的 segment。

    Args:
        original_segment: 原始的表格 segment 字典。
        common_params: 通用参数字典。
        function_name_to_call: 要调用的确切指标函数名称。
        parameters_for_call: 调用指标函数所需的参数。

    Returns:
        更新了 tbody 的 segment 字典。
    """
    try:
        table_name = original_segment.get("name", "")
        logger.info(f"开始处理表格 '{table_name}'，使用指定的函数 '{function_name_to_call}'。")

        # 步骤 1: (已移除) 不再猜测函数，直接使用传入的 function_name_to_call

        # 步骤 2: 调用指定的指标函数获取原始数据
        # 合并通用参数和特定参数，同时确保db对象被正确传递
        final_params = common_params.copy()
        final_params.update(parameters_for_call)
        if 'db' not in final_params:
            final_params['db'] = db
        
        logger.info(f"为表格 '{table_name}' 调用底层指标函数: '{function_name_to_call}'，参数: {json.dumps(parameters_for_call, ensure_ascii=False, indent=2)}")
        raw_data = function_manager.call_function(function_name_to_call, **final_params)
        raw_data_str_for_prompt = json.dumps(raw_data, indent=2, ensure_ascii=False, default=_decimal_default)
        
        # 步骤 3: 准备并调用专职LLM以生成数据转换代码
        prompt = f"""
# Role: Python Data Transformation Script Generator

# Task:
You are an expert Python programmer. Your task is to write a Python script that transforms raw JSON data into a new `tbody` structure for an HTML table, following a set of strict rules.

# Key Rules & Constraints:
0.  **Data Selection (Most Critical First Step)**: The raw data may contain multiple data groups (e.g., for '博士', '硕士', '总体'). Your first and most important task is to select the correct data group to process based on the table's context.
    -   Analyze the `table_name` ("{table_name}"). If it explicitly asks for "总体" (Overall), you **MUST** find the object in the raw data where `group == '总体'` and use **only that data** for this table.
    -   Conversely, if the `table_name` asks for a breakdown by categories like "各学历" (By Education Level), you should process the data for each of those specific education levels and likely **ignore** the '总体' data group.
    -   Use the `thead` as a crucial clue. A simple header like `["...", "人数", "占比"]` strongly implies you should be using the '总体' data. A complex header with repeating columns for '博士', '硕士', etc., confirms you need to process the individual groups.
1.  **Structural Mimicry (MOST IMPORTANT)**: The new `tbody`'s structure must mimic the `original_tbody`. Pay close attention to structural patterns, such as the presence of subtotal rows (e.g., rows containing "小计"). If the original table has subtotal rows for each group (like a department), your generated script **MUST** calculate and include similar subtotal rows in the new table.
2.  **Intelligent Formatting (CRITICAL)**: The data format in each cell of the new `tbody` MUST precisely mirror the format of the corresponding column in the `original_tbody`.
    - **However, you MUST be intelligent**: If the `original_tbody` contains obvious placeholder or junk data (e.g., `200.00%`, `00`, `*` ), you **MUST IGNORE** that specific junk value and instead apply the **correct calculation method** based on the raw data.
    - For percentages, the correct method is typically `f"{{value * 100:.2f}}%"`. Do not blindly copy incorrect percentage formats.
3.  **Data Fidelity**: Only use the data types present in the `original_tbody`. For example, if a column in the original table contains only numbers (e.g., `["123"]`), your script must output only numbers in that column, even if the raw data provides additional details like percentages. Do NOT add new information that wasn't in the original format.
4.  **Mandatory Sorting Rules**: 
    - **Education Level Sorting**: If the raw data involves education levels ('学历'), the final `tbody` rows **MUST** be sorted in the following descending order: '博士', '硕士', '本科', '专科'.
    - **Numerical Value Sorting**: When the table contains numerical data (e.g., counts, rates, amounts), rows should generally be sorted by the primary numerical column in **descending order** (from largest to smallest), unless the `original_tbody` clearly demonstrates a different sorting pattern (e.g., alphabetical by name). Analyze the `original_tbody` carefully to determine the appropriate sorting key and direction.
    - Your script must include logic to handle the appropriate sorting based on the table's structure and content.
5.  **Structural Integrity**: The number of columns in each row of your new `tbody` must exactly match the number of columns in the `thead`.
6.  **Input/Output**: The script MUST read the raw JSON data from standard input (`sys.stdin`) and its ONLY output must be the new `tbody` (as a list of lists), serialized as a JSON string, printed to standard output (`print(json.dumps(new_tbody, ensure_ascii=False))`).

# Raw Data to be Processed:
```json
{raw_data_str_for_prompt}
```

# Target Table Structure & Format Reference:
- `thead`: {json.dumps(original_segment.get("thead", []), ensure_ascii=False)}
- `original_tbody` (provides full structural and formatting context): {json.dumps(original_segment.get("tbody", []), ensure_ascii=False)}

# Your Python Code:
"""

        logger.info(f"为函数 '{function_name_to_call}' 生成代码的 Prompt 核心内容:\n# Raw Data to be Processed:\n```json\n{raw_data_str_for_prompt[:1500]}...\n```\n\n# Target Table Structure & Format Reference:\n- `thead`: {json.dumps(original_segment.get('thead', []), ensure_ascii=False)}\n- `original_tbody`: {json.dumps(original_segment.get('tbody', []), ensure_ascii=False)[:1500]}...")
        
        # 使用线程池异步执行同步的LLM调用，避免阻塞
        dashscope.api_key = llm_config.DASHSCOPE_API_KEY
        response = await asyncio.get_event_loop().run_in_executor(
            _llm_executor,
            lambda: dashscope.Generation.call(
                model=llm_config.AGENT_CLIENT_MODEL,
                prompt=prompt,
            )
        )

        if response.status_code != HTTPStatus.OK:
            raise ToolError(f"代码生成LLM调用失败: {response.message}")
        
        generated_code = _get_text_from_llm_response(response)

        if not generated_code:
            raise ToolError(f"从LLM响应中未能提取任何代码内容。响应详情: {response}")
        
        generated_code = generated_code.strip()
        if generated_code.startswith("```python"):
            generated_code = generated_code[9:].strip()
            if generated_code.endswith("```"):
                generated_code = generated_code[:-3].strip()

        logger.info(f"生成的代码:\n{generated_code}")

        # 步骤 4: 安全地执行生成的代码（异步执行）
        raw_data_str_for_execution = json.dumps(raw_data, ensure_ascii=False, default=_decimal_default)
        logger.info("正在执行生成的数据转换代码...")
        new_tbody_str = await asyncio.get_event_loop().run_in_executor(
            _llm_executor,
            lambda: execute_code(generated_code, raw_data_str_for_execution)
        )
        
        # 步骤 5: 处理结果并返回更新后的 segment
        new_tbody = json.loads(new_tbody_str)
        
        updated_segment = original_segment.copy()
        updated_segment["tbody"] = new_tbody
        
        logger.info(f"表格 '{table_name}' 更新成功。")
        return updated_segment

    except CodeExecutionError as e:
        error_msg = f"执行为表格 '{original_segment.get('name', 'N/A')}' 生成的代码时失败: {e}"
        logger.error(f"{error_msg}\nStderr: {e.stderr}")
        raise ToolError(error_msg)
    except Exception as e:
        error_msg = f"处理表格 '{original_segment.get('name', 'N/A')}' 时发生未知错误: {str(e)}"
        logger.error(error_msg, exc_info=True)
        raise ToolError(error_msg)

# Function list endpoint for RAG knowledge base
@mcp.custom_route("/function_list", methods=["GET"])
async def get_function_list(request: Request) -> JSONResponse:
    """Get function list for RAG knowledge base"""
    try:
        function_list = function_manager.get_function_list()
        return JSONResponse({
            "status": "success",
            "data": function_list,
            "count": len(function_list),
            "timestamp": datetime.now().isoformat()
        })
    except Exception as e:
        logger.error(f"Failed to get function list: {e}")
        return JSONResponse({"status": "error", "error": str(e)}, status_code=500)

# Function list text endpoint for RAG knowledge base
@mcp.custom_route("/function_list_text", methods=["GET"])
async def get_function_list_text(request: Request) -> JSONResponse:
    """Get function list as formatted text for RAG knowledge base"""
    try:
        function_list_text = function_manager.get_function_list_text()
        return JSONResponse({
            "status": "success",
            "data": function_list_text,
            "timestamp": datetime.now().isoformat()
        })
    except Exception as e:
        logger.error(f"Failed to get function list text: {e}")
        return JSONResponse({"status": "error", "error": str(e)}, status_code=500)

# Force reload functions endpoint (for compatibility with MCP Generator notifications)
@mcp.custom_route("/force-reload-functions", methods=["POST"])
async def force_reload_functions(request: Request) -> JSONResponse:
    """Force reload functions from registry (for MCP Generator notifications)"""
    try:
        # Reload the function registry
        function_manager.functions = function_manager._load_registry()
        function_count = len(function_manager.functions)
        logger.info(f"✅ 已重新加载 {function_count} 个函数")
        return JSONResponse({
            "message": f"已重新加载 {function_count} 个函数",
            "count": function_count,
            "timestamp": datetime.now().isoformat()
        })
    except Exception as e:
        logger.error(f"❌ 重新加载函数失败: {e}")
        return JSONResponse({"message": f"重新加载函数失败: {str(e)}"}, status_code=500)

# Server execution block
if __name__ == "__main__":
    """
    Server entry point
    
    Runs the MCP server with Server-Sent Events (SSE) transport protocol
    - Automatic tool registration handling
    - Built-in validation and error handling
    - Context-aware execution environment
    """
    try:
        # Start the MCP server with specified transport
        mcp.run(transport="sse")
    except KeyboardInterrupt:
        # Graceful shutdown on Ctrl+C
        logger.info("Shutting down MCP server...")