"""
Agent + LCEL Routing Integration Demo

This script demonstrates the successful integration of LCEL routing chains
into the agent's streaming chat method.
"""

from app.services.chat_service_simple import ChatServiceSimple
from app.services.lcel_routing_service import LCELRoutingService
import asyncio
import time


def demo_routing_architecture():
    """
    Demonstrate the routing architecture used in the agent
    """
    print("🏗️ Agent + LCEL路由架构演示")
    print("=" * 50)

    print("📋 架构组件:")
    print("1. Agent模式 - 意图分析和工具调用")
    print("2. LCEL路由链 - 智能回答生成")
    print("3. RunnableBranch - 优雅的路由决策")
    print("4. 流式输出 - 实时用户体验")

    print("\n🔄 工作流程:")
    print("1. 🤔 用户输入 → Agent意图分析")
    print("2. 🛠️ 调用相应工具 (生肖/系统/计算等)")
    print("3. 📊 工具结果整理")
    print("4. 🔀 LCEL路由链处理回答生成")
    print("5. 💬 流式输出结果")

    print("\n🎯 路由策略:")
    print("• 有工具结果 → 基于工具结果智能回答")
    print("• 无工具结果 → 直接使用LCEL路由链")
    print("• 路由失败 → 回退到传统LLM调用")


def demo_lcel_routing_vs_direct_llm():
    """
    Compare LCEL routing with direct LLM calls
    """
    print("\n⚡ LCEL路由 vs 直接LLM调用对比")
    print("=" * 50)

    test_queries = [
        "我1990年出生，属什么生肖？",
        "当前服务器负载情况如何？",
        "计算 123 + 456 等于多少？"
    ]

    lcel_service = LCELRoutingService()

    for query in test_queries:
        print(f"\n🔍 查询: {query}")
        print("-" * 40)

        # Test LCEL routing
        start_time = time.time()
        try:
            routing_chain = lcel_service.create_routing_branch_lcel()
            lcel_result = routing_chain.invoke(query)
            lcel_time = time.time() - start_time
            print(f"✅ LCEL路由: {lcel_result[:100]}...")
            print(f"   耗时: {lcel_time:.3f}秒")
        except Exception as e:
            print(f"❌ LCEL路由错误: {e}")

        # Test direct LLM call
        start_time = time.time()
        try:
            from langchain_openai import ChatOpenAI
            from app.core.config import settings
            llm = ChatOpenAI(
                model="deepseek-chat",
                temperature=0.7,
                api_key=settings.openai_api_key,
                base_url=settings.openai_base_url
            )
            llm_result = llm.invoke(query)
            llm_time = time.time() - start_time
            print(f"🤖 直接LLM: {llm_result.content[:100]}...")
            print(f"   耗时: {llm_time:.3f}秒")
        except Exception as e:
            print(f"❌ 直接LLM错误: {e}")


async def demo_agent_with_lcel_routing():
    """
    Demo the agent with LCEL routing integration
    """
    print("\n🚀 Agent + LCEL路由完整演示")
    print("=" * 50)

    chat_service = ChatServiceSimple()

    # Demo queries that showcase different routing paths
    demo_scenarios = [
        {
            "name": "生肖查询 (工具 + 路由)",
            "query": "我1995年出生，属什么生肖？",
            "expected_tools": ["get_zodiac_sign"],
            "description": "Agent调用生肖工具，LCEL路由处理回答"
        },
        {
            "name": "系统查询 (工具 + 路由)",
            "query": "服务器性能如何？",
            "expected_tools": ["handle_system_query"],
            "description": "Agent调用系统工具，LCEL路由处理回答"
        },
        {
            "name": "数学计算 (工具 + 路由)",
            "query": "计算 50 * 3 + 20 等于多少？",
            "expected_tools": ["calculate"],
            "description": "Agent调用计算工具，LCEL路由处理回答"
        },
        {
            "name": "一般对话 (直接路由)",
            "query": "什么是深度学习？",
            "expected_tools": [],
            "description": "无工具调用，直接使用LCEL路由链"
        }
    ]

    for i, scenario in enumerate(demo_scenarios, 1):
        print(f"\n📝 场景 {i}: {scenario['name']}")
        print(f"问题: {scenario['query']}")
        print(f"描述: {scenario['description']}")
        print("-" * 50)

        try:
            response_parts = []
            async for chunk in chat_service.agent_chat_stream(scenario['query'], None, 1):
                response_parts.append(chunk)

            full_response = "".join(response_parts)
            print(f"✅ 响应长度: {len(full_response)} 字符")
            print(f"📊 响应预览: {full_response[:150]}...")

            # Check if routing was used
            if "路由链" in full_response:
                print("🔀 路由状态: ✅ LCEL路由链已使用")
            else:
                print("🔀 路由状态: ⚠️ 可能使用了回退模式")

        except Exception as e:
            print(f"❌ 错误: {e}")

        print("=" * 60)


def show_integration_benefits():
    """
    Show the benefits of this integration
    """
    print("\n🎯 Agent + LCEL路由集成优势")
    print("=" * 50)

    print("✅ 性能优势:")
    print("• 智能路由减少不必要的LLM调用")
    print("• 专业领域使用优化处理链")
    print("• 流式输出保持用户体验")

    print("\n✅ 功能优势:")
    print("• Agent负责意图分析和工具调用")
    print("• LCEL路由负责专业回答生成")
    print("• 混合模式发挥各自优势")

    print("\n✅ 维护优势:")
    print("• LCEL语法简洁易读")
    print("• 模块化设计便于扩展")
    print("• 完善的错误处理和回退机制")

    print("\n✅ 用户体验:")
    print("• 快速响应专业问题")
    print("• 流式输出实时反馈")
    print("• 准确的工具结果集成")


if __name__ == "__main__":
    print("🚀 Agent + LCEL路由集成演示")
    print("=" * 60)

    # Run demos
    demo_routing_architecture()
    demo_lcel_routing_vs_direct_llm()
    asyncio.run(demo_agent_with_lcel_routing())
    show_integration_benefits()

    print("\n✅ 演示完成")
    print("\n🏆 总结:")
    print("• ✅ 成功将LCEL路由链集成到Agent流式聊天中")
    print("• ✅ Agent负责工具调用，LCEL负责回答生成")
    print("• ✅ 保持流式输出体验的同时提升专业处理能力")
    print("• ✅ 完善的错误处理和回退机制")