#!/usr/bin/env python3
"""
Hydrology Forecast Analysis Agent using MCP Server.

This example demonstrates how to use the MCP server for analyzing
hydrology forecast data with LLM-powered natural language queries.
"""

import asyncio
import sys
import os

# 添加项目根目录到 Python 路径
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)

from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate

# Import Tool (compatible with langchain 1.x)
try:
    from langchain_core.tools import StructuredTool as Tool
except ImportError:
    try:
        from langchain.tools import Tool
    except ImportError:
        from langchain_core.tools import tool as Tool

# Import agent components (compatible with langchain 1.x)
# 分别导入，避免一次性导入失败
try:
    from langchain.agents import AgentExecutor
except ImportError:
    from langchain.agents.agent import AgentExecutor

try:
    from langchain.agents import create_tool_calling_agent
    create_agent_func = create_tool_calling_agent
except ImportError:
    try:
        from langchain.agents import create_openai_tools_agent
        create_agent_func = create_openai_tools_agent
    except ImportError:
        from langchain.agents import create_react_agent
        create_agent_func = create_react_agent


# ==================== Configuration ====================

# vLLM Configuration
VLLM_CONFIG = {
    "base_url": "http://localhost:8000/v1",
    "model_name": "Qwen/Qwen2.5-3B-Instruct",
    "temperature": 0.7,
    "max_tokens": 2048,
}

# Default session ID (can be overridden)
DEFAULT_SESSION_ID = "daa0d8a6-dfb3-4887-b798-366f4682c453"


# ==================== MCP Tools Wrapper ====================

# Note: These are wrapper functions that call the MCP server
# In production, you would use actual MCP client to communicate with the server
# For now, we'll import and use the analyzer directly

from src.mcp.forecast_client import ForecastAPIClient
from src.mcp.forecast_analyzer import ForecastAnalyzer


async def get_forecast_session_info(session_id: str) -> str:
    """获取预报会话基本信息。"""
    client = ForecastAPIClient()
    data = await client.fetch_forecast_data(session_id)
    analyzer = ForecastAnalyzer(data)
    
    session_info = analyzer.get_session_info()
    model_params = analyzer.get_model_parameters()
    
    result = f"""
预报会话信息：
- 会话ID: {session_info['session_id']}
- 站点: {session_info['station_name']} ({session_info['station_code']})
- 模型类型: {session_info['model_type']}
- 模型链: {session_info['model_chain']}
- 假拟预报数量: {session_info['num_hypothetical']}

模型参数：
- 模型名称: {model_params.get('model_name')}
- 初始状态: SA0={model_params.get('initial_state', {}).get('SA0')}, UA0={model_params.get('initial_state', {}).get('UA0')}, YA0={model_params.get('initial_state', {}).get('YA0')}
"""
    return result


async def analyze_flood_metrics(session_id: str, forecast_type: str = "all") -> str:
    """分析洪水指标（洪量、洪峰、峰现时间）。"""
    client = ForecastAPIClient()
    data = await client.fetch_forecast_data(session_id)
    analyzer = ForecastAnalyzer(data)
    
    results = []
    
    if forecast_type in ["real", "all"]:
        real_analysis = analyzer.analyze_real_forecast()
        metrics = real_analysis["metrics"]
        
        if "error" not in metrics:
            results.append(f"""
真实预报指标：
- 洪量: {metrics['flood_volume_m3']:,.2f} m³
- 洪峰流量: {metrics['peak_discharge_m3s']:.2f} m³/s
- 峰现时间: {metrics['peak_time']}
- 平均流量: {metrics['avg_discharge_m3s']:.2f} m³/s
- 数据点数: {metrics['num_points']}
- 预报时段: {metrics['start_time']} 至 {metrics['end_time']}
""")
    
    if forecast_type in ["hypothetical", "all"]:
        hyp_analyses = analyzer.analyze_all_hypothetical()
        
        for i, hyp in enumerate(hyp_analyses):
            metrics = hyp["metrics"]
            if "error" not in metrics:
                results.append(f"""
假拟预报 #{i} ({hyp['name']}):
- 洪量: {metrics['flood_volume_m3']:,.2f} m³
- 洪峰流量: {metrics['peak_discharge_m3s']:.2f} m³/s
- 峰现时间: {metrics['peak_time']}
- 平均流量: {metrics['avg_discharge_m3s']:.2f} m³/s
""")
    
    return "\n".join(results) if results else "未找到有效的预报数据"


async def compare_forecasts_tool(session_id: str) -> str:
    """对比真实预报与假拟预报的差异。"""
    client = ForecastAPIClient()
    data = await client.fetch_forecast_data(session_id)
    analyzer = ForecastAnalyzer(data)
    
    comparison = analyzer.compare_forecasts()
    
    if "error" in comparison:
        return f"错误: {comparison['error']}"
    
    real_metrics = comparison["real_forecast"]
    results = [f"""
真实预报基准值：
- 洪量: {real_metrics['flood_volume_m3']:,.2f} m³
- 洪峰流量: {real_metrics['peak_discharge_m3s']:.2f} m³/s
- 峰现时间: {real_metrics['peak_time']}

假拟预报对比（共 {comparison['num_hypothetical']} 个）：
"""]
    
    for comp in comparison["comparisons"]:
        if "error" in comp:
            results.append(f"\n{comp['name']}: {comp['error']}")
            continue
        
        diff = comp["differences"]
        results.append(f"""
{comp['name']}:
  洪量差异: {diff['flood_volume']['difference_m3']:+,.2f} m³ ({diff['flood_volume']['difference_percent']:+.2f}%)
  洪峰差异: {diff['peak_discharge']['difference_m3s']:+.2f} m³/s ({diff['peak_discharge']['difference_percent']:+.2f}%)
  峰现时间: {diff['peak_time']['hypothetical']} {'(相同)' if diff['peak_time']['same'] else f'(真实: {diff["peak_time"]["real"]})'}
""")
    
    # Add summary
    summary = comparison["summary"]
    if "note" not in summary:
        results.append(f"""
统计摘要：
  洪量差异范围: {summary['volume_diff_range']['min_percent']:.2f}% ~ {summary['volume_diff_range']['max_percent']:.2f}% (平均: {summary['volume_diff_range']['avg_percent']:.2f}%)
  洪峰差异范围: {summary['peak_diff_range']['min_percent']:.2f}% ~ {summary['peak_diff_range']['max_percent']:.2f}% (平均: {summary['peak_diff_range']['avg_percent']:.2f}%)
""")
    
    return "".join(results)


# ==================== LangChain Tools ====================

def create_hydrology_tools(session_id: str = DEFAULT_SESSION_ID):
    """Create LangChain tools for hydrology analysis."""
    
    def sync_get_session_info(input_str: str = "") -> str:
        """同步包装器: 获取预报会话信息。"""
        return asyncio.run(get_forecast_session_info(session_id))
    
    def sync_analyze_metrics(input_str: str = "all") -> str:
        """同步包装器: 分析洪水指标。输入可以是 'real', 'hypothetical', 或 'all'。"""
        return asyncio.run(analyze_flood_metrics(session_id, input_str))
    
    def sync_compare(input_str: str = "") -> str:
        """同步包装器: 对比预报差异。"""
        return asyncio.run(compare_forecasts_tool(session_id))
    
    return [
        Tool.from_function(
            name="get_forecast_session_info",
            func=sync_get_session_info,
            description="""获取水文预报会话的基本信息，包括站点名称、模型类型、假拟预报数量等。
            输入: 空字符串（会使用默认session_id）
            返回: 格式化的会话信息文本
            """
        ),
        Tool.from_function(
            name="analyze_flood_metrics",
            func=sync_analyze_metrics,
            description="""分析水文预报的洪水指标（洪量、洪峰、峰现时间）。
            输入: 'real' (仅真实预报), 'hypothetical' (仅假拟预报), 或 'all' (所有预报，默认)
            返回: 详细的洪水指标分析结果
            """
        ),
        Tool.from_function(
            name="compare_forecasts",
            func=sync_compare,
            description="""对比真实预报与所有假拟预报的差异，包括洪量、洪峰和峰现时间的对比。
            输入: 空字符串
            返回: 详细的对比分析结果，包括差异的绝对值和百分比
            """
        )
    ]


# ==================== Agent Setup ====================

def create_hydrology_agent(session_id: str = DEFAULT_SESSION_ID):
    """Create hydrology forecast analysis agent."""
    
    # Initialize LLM (vLLM 不需要真实的 API key)
    llm = ChatOpenAI(
        base_url=VLLM_CONFIG["base_url"],
        model_name=VLLM_CONFIG["model_name"],
        api_key="EMPTY",  # vLLM 不验证 key，随便填
        temperature=VLLM_CONFIG["temperature"],
        max_tokens=VLLM_CONFIG["max_tokens"],
    )
    
    # Create tools
    tools = create_hydrology_tools(session_id)
    
    # Create prompt
    prompt = ChatPromptTemplate.from_messages([
        ("system", """你是一位专业的水文预报分析专家。你可以使用以下工具来分析水文预报数据：

1. get_forecast_session_info: 获取预报会话基本信息
2. analyze_flood_metrics: 分析洪水指标（洪量、洪峰、峰现时间）
3. compare_forecasts: 对比真实预报与假拟预报的差异

请根据用户的问题，选择合适的工具进行分析，并用清晰、专业的语言解释结果。

术语说明：
- SA0: 流域蓄水初值
- UA0: 地下水初值  
- YA0: 河道蓄水初值
- 洪量: 洪水总量（m³）
- 洪峰: 最大流量（m³/s）
- 峰现时间: 洪峰出现的时间

当用户询问"洪量、洪峰、峰现时间"时，应该使用 analyze_flood_metrics 工具。
当用户询问"差异"、"对比"时，应该使用 compare_forecasts 工具。
"""),
        ("human", "{input}"),
        ("placeholder", "{agent_scratchpad}")
    ])
    
    # Create agent (use the compatible function)
    agent = create_agent_func(llm, tools, prompt)
    
    return AgentExecutor(
        agent=agent,
        tools=tools,
        verbose=True,
        max_iterations=5,
        handle_parsing_errors=True
    )


# ==================== Example Queries ====================

EXAMPLE_QUERIES = [
    "这次预报的基本信息是什么？",
    "真实预报的洪量、洪峰和峰现时间分别是多少？",
    "所有假拟预报的洪量、洪峰和峰现时间是多少？",
    "对比真实预报和假拟预报在洪量、洪峰、峰现时间上的差异",
    "哪个假拟预报的洪峰最接近真实预报？",
]


# ==================== Main Function ====================

async def main():
    """Run the hydrology forecast analysis agent."""
    print("="*80)
    print("🌊 水文预报分析 Agent")
    print("="*80)
    print(f"\n使用会话ID: {DEFAULT_SESSION_ID}")
    print(f"使用模型: {VLLM_CONFIG['model_name']}")
    print("\n" + "="*80)
    
    # Create agent
    agent = create_hydrology_agent(DEFAULT_SESSION_ID)
    
    # Run example queries
    for i, query in enumerate(EXAMPLE_QUERIES, 1):
        print(f"\n{'='*80}")
        print(f"示例 {i}: {query}")
        print("="*80)
        
        try:
            result = agent.invoke({"input": query})
            print(f"\n✅ 回答:\n{result['output']}")
        except Exception as e:
            print(f"\n❌ 错误: {str(e)}")
        
        print("\n" + "-"*80)
    
    # Interactive mode
    print("\n" + "="*80)
    print("💬 交互模式 (输入 'quit' 退出)")
    print("="*80)
    
    while True:
        try:
            query = input("\n❓ 你的问题: ").strip()
            
            if query.lower() in ['quit', 'exit', 'q']:
                print("\n👋 再见！")
                break
            
            if not query:
                continue
            
            result = agent.invoke({"input": query})
            print(f"\n✅ 回答:\n{result['output']}")
            
        except KeyboardInterrupt:
            print("\n\n👋 再见！")
            break
        except Exception as e:
            print(f"\n❌ 错误: {str(e)}")


if __name__ == "__main__":
    asyncio.run(main())

