#!/usr/bin/env python3
"""
直接调用版水文预报分析工具（不使用 LangChain Agent）

适用于任何 langchain 版本，或者不安装 langchain
"""

import asyncio
import sys
import os

# 添加项目根目录到 Python 路径
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)

from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate

from src.mcp.forecast_client import ForecastAPIClient
from src.mcp.forecast_analyzer import ForecastAnalyzer


# ==================== 配置 ====================

VLLM_CONFIG = {
    "base_url": "http://localhost:8000/v1",
    "model_name": "Qwen/Qwen2.5-3B-Instruct",
    "temperature": 0.7,
    "max_tokens": 2048,
}

DEFAULT_SESSION_ID = "daa0d8a6-dfb3-4887-b798-366f4682c453"


# ==================== 工具函数 ====================

async def get_session_info(session_id=DEFAULT_SESSION_ID):
    """获取预报会话信息"""
    client = ForecastAPIClient()
    data = await client.fetch_forecast_data(session_id)
    analyzer = ForecastAnalyzer(data)
    
    session_info = analyzer.get_session_info()
    model_params = analyzer.get_model_parameters()
    
    result = f"""
预报会话信息：
- 会话ID: {session_info['session_id']}
- 站点: {session_info['station_name']} ({session_info['station_code']})
- 模型类型: {session_info['model_type']}
- 模型链: {session_info['model_chain']}
- 假拟预报数量: {session_info['num_hypothetical']}

模型参数：
- 模型名称: {model_params.get('model_name')}
- 初始状态: SA0={model_params.get('initial_state', {}).get('SA0')}, UA0={model_params.get('initial_state', {}).get('UA0')}, YA0={model_params.get('initial_state', {}).get('YA0')}
"""
    return result.strip()


async def analyze_metrics(forecast_type="all", session_id=DEFAULT_SESSION_ID):
    """分析洪水指标"""
    client = ForecastAPIClient()
    data = await client.fetch_forecast_data(session_id)
    analyzer = ForecastAnalyzer(data)
    
    results = []
    
    if forecast_type in ["real", "all"]:
        real_analysis = analyzer.analyze_real_forecast()
        metrics = real_analysis["metrics"]
        
        if "error" not in metrics:
            results.append(f"""
真实预报指标：
- 洪量: {metrics['flood_volume_m3']:,.2f} m³
- 洪峰流量: {metrics['peak_discharge_m3s']:.2f} m³/s
- 峰现时间: {metrics['peak_time']}
- 平均流量: {metrics['avg_discharge_m3s']:.2f} m³/s
- 数据点数: {metrics['num_points']}
- 预报时段: {metrics['start_time']} 至 {metrics['end_time']}
""")
    
    if forecast_type in ["hypothetical", "all"]:
        hyp_analyses = analyzer.analyze_all_hypothetical()
        
        for i, hyp in enumerate(hyp_analyses):
            metrics = hyp["metrics"]
            if "error" not in metrics:
                results.append(f"""
假拟预报 #{i} ({hyp['name']}):
- 洪量: {metrics['flood_volume_m3']:,.2f} m³
- 洪峰流量: {metrics['peak_discharge_m3s']:.2f} m³/s
- 峰现时间: {metrics['peak_time']}
- 平均流量: {metrics['avg_discharge_m3s']:.2f} m³/s
""")
    
    return "\n".join(results).strip() if results else "未找到有效的预报数据"


async def compare_forecasts(session_id=DEFAULT_SESSION_ID):
    """对比预报差异"""
    client = ForecastAPIClient()
    data = await client.fetch_forecast_data(session_id)
    analyzer = ForecastAnalyzer(data)
    
    comparison = analyzer.compare_forecasts()
    
    if "error" in comparison:
        return f"错误: {comparison['error']}"
    
    real_metrics = comparison["real_forecast"]
    results = [f"""
真实预报基准值：
- 洪量: {real_metrics['flood_volume_m3']:,.2f} m³
- 洪峰流量: {real_metrics['peak_discharge_m3s']:.2f} m³/s
- 峰现时间: {real_metrics['peak_time']}

假拟预报对比（共 {comparison['num_hypothetical']} 个）：
"""]
    
    for comp in comparison["comparisons"]:
        if "error" in comp:
            results.append(f"\n{comp['name']}: {comp['error']}")
            continue
        
        diff = comp["differences"]
        results.append(f"""
{comp['name']}:
  洪量差异: {diff['flood_volume']['difference_m3']:+,.2f} m³ ({diff['flood_volume']['difference_percent']:+.2f}%)
  洪峰差异: {diff['peak_discharge']['difference_m3s']:+.2f} m³/s ({diff['peak_discharge']['difference_percent']:+.2f}%)
  峰现时间: {diff['peak_time']['hypothetical']} {'(相同)' if diff['peak_time']['same'] else f'(真实: {diff["peak_time"]["real"]})'}
""")
    
    # 统计摘要
    summary = comparison["summary"]
    if "note" not in summary:
        results.append(f"""
统计摘要：
  洪量差异范围: {summary['volume_diff_range']['min_percent']:.2f}% ~ {summary['volume_diff_range']['max_percent']:.2f}% (平均: {summary['volume_diff_range']['avg_percent']:.2f}%)
  洪峰差异范围: {summary['peak_diff_range']['min_percent']:.2f}% ~ {summary['peak_diff_range']['max_percent']:.2f}% (平均: {summary['peak_diff_range']['avg_percent']:.2f}%)
""")
    
    return "".join(results).strip()


# ==================== LLM 辅助回答 ====================

async def ask_with_llm(question: str, session_id=DEFAULT_SESSION_ID):
    """使用 LLM 理解问题并调用相应工具"""
    
    # 初始化 LLM（vLLM 不需要真实的 API key）
    llm = ChatOpenAI(
        base_url=VLLM_CONFIG["base_url"],
        model_name=VLLM_CONFIG["model_name"],
        api_key="EMPTY",  # vLLM 不验证 key，随便填
        temperature=0.3,  # 更确定性
        max_tokens=500,
    )
    
    # 分析用户问题，决定调用哪个工具
    classifier_prompt = ChatPromptTemplate.from_messages([
        ("system", """你是一个水文预报分析助手。根据用户问题，判断应该调用哪个工具：

工具1: get_session_info - 查询会话基本信息、站点、模型信息
工具2: analyze_metrics - 分析洪量、洪峰、峰现时间等指标
工具3: compare_forecasts - 对比真实预报和假拟预报的差异

请只回复工具名称，不要有其他内容。如果不确定，回复 analyze_metrics。"""),
        ("human", "{question}")
    ])
    
    # 让 LLM 选择工具
    chain = classifier_prompt | llm
    response = await chain.ainvoke({"question": question})
    tool_name = response.content.strip().lower()
    
    # 调用相应工具
    if "session" in tool_name or "info" in tool_name:
        tool_result = await get_session_info(session_id)
    elif "compare" in tool_name or "对比" in tool_name or "差异" in tool_name:
        tool_result = await compare_forecasts(session_id)
    else:
        # 默认分析指标
        tool_result = await analyze_metrics("all", session_id)
    
    # 让 LLM 用自然语言总结
    answer_prompt = ChatPromptTemplate.from_messages([
        ("system", """你是水文预报分析专家。根据工具返回的数据，用清晰专业的语言回答用户问题。

术语说明：
- SA0: 流域蓄水初值
- UA0: 地下水初值  
- YA0: 河道蓄水初值
- 洪量: 洪水总量（m³）
- 洪峰: 最大流量（m³/s）
- 峰现时间: 洪峰出现时间

请直接回答问题，不要重复工具输出。"""),
        ("human", "用户问题: {question}\n\n工具返回数据:\n{tool_result}\n\n请回答用户问题:")
    ])
    
    # 使用同一个 LLM 实例（已包含 api_key）
    answer_chain = answer_prompt | llm
    final_response = await answer_chain.ainvoke({
        "question": question,
        "tool_result": tool_result
    })
    
    return final_response.content


# ==================== 示例查询 ====================

EXAMPLE_QUERIES = [
    "这次预报的基本信息是什么？",
    "真实预报的洪量、洪峰和峰现时间分别是多少？",
    "所有假拟预报的洪量、洪峰和峰现时间是多少？",
    "对比真实预报和假拟预报在洪量、洪峰、峰现时间上的差异",
]


# ==================== 主程序 ====================

async def main():
    """主程序"""
    print("="*80)
    print("🌊 水文预报分析工具（直接调用版）")
    print("="*80)
    print(f"\n使用会话ID: {DEFAULT_SESSION_ID}")
    print(f"使用模型: {VLLM_CONFIG['model_name']}")
    print("\n" + "="*80)
    
    # 运行示例查询
    for i, query in enumerate(EXAMPLE_QUERIES, 1):
        print(f"\n{'='*80}")
        print(f"示例 {i}: {query}")
        print("="*80)
        
        try:
            answer = await ask_with_llm(query, DEFAULT_SESSION_ID)
            print(f"\n✅ 回答:\n{answer}")
        except Exception as e:
            print(f"\n❌ 错误: {str(e)}")
            import traceback
            traceback.print_exc()
        
        print("\n" + "-"*80)
    
    # 交互模式
    print("\n" + "="*80)
    print("💬 交互模式 (输入 'quit' 退出)")
    print("="*80)
    
    while True:
        try:
            query = input("\n❓ 你的问题: ").strip()
            
            if query.lower() in ['quit', 'exit', 'q']:
                print("\n👋 再见！")
                break
            
            if not query:
                continue
            
            answer = await ask_with_llm(query, DEFAULT_SESSION_ID)
            print(f"\n✅ 回答:\n{answer}")
            
        except KeyboardInterrupt:
            print("\n\n👋 再见！")
            break
        except Exception as e:
            print(f"\n❌ 错误: {str(e)}")


if __name__ == "__main__":
    asyncio.run(main())

