import json
import aiohttp
import asyncio
from typing import Optional, AsyncGenerator, Dict, List, Any, Callable
from sqlalchemy.orm import Session
from datetime import datetime

from langchain_core.callbacks import StdOutCallbackHandler
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, BaseMessage
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableParallel, RunnablePassthrough, RunnableConfig
from langchain_openai import ChatOpenAI
from langchain.memory import ConversationTokenBufferMemory

from app.prompt.template import make_prompt, make_template, make_role, make_role_base, make_role_base_english, \
    make_role_base_explain
from app.schemas.chat import ChatJsonPaser
from app.schemas.llm_callback import LlmCallbackHandler
from app.services.chat_memory_service import ChatMemoryService
from app.services.zodiac_service import get_zodiac_sign, get_all_zodiac_signs, get_zodiac_compatibility
from app.services.system_agent import handle_system_query
from app.models.chat import MessageRole, ChatMode
from app.core.database import get_sync_db
from app.core.config import settings



def retrieval(query: str) -> str:
    """一个模拟检索函数 """
    print("searching.....", query)
    return "我是王小测"


class ChatServiceSimple:
    def __init__(self, model_name: Optional[str] = None, temperature: float = 0.7):
        self.model_name = model_name or settings.openai_model
        self.temperature = temperature
        self.llm = ChatOpenAI(
            model=self.model_name,
            temperature=self.temperature,
            max_tokens=65536,
            api_key=settings.openai_api_key,
            base_url=settings.openai_base_url,
            streaming=True
        )
        self.memory_service = ChatMemoryService()


    def chat_no_memory(self, message: str) -> str:
        res = self.llm.invoke([
            HumanMessage(content=message),
            AIMessage(content="你是一个助手"),
            HumanMessage(content="我的年龄是36岁")
        ])
        return res.content

    def chat_with_prompt(self, message: str) -> str:
        prompt = make_prompt(message)
        res = self.llm.invoke([
            HumanMessage(message),
            SystemMessage(prompt.to_string())
        ])
        return res.content

    def chat_with_template(self, message: str) -> str:
        template = make_template(message)
        res = self.llm.invoke([
           template
        ])
        return res.content

    def chat_with_role(self, message: str) -> str:
        role = make_role(message)
        res = self.llm.invoke([
            role
        ])
        print(res.pretty_print())
        print(res.type)
        print(res.response_metadata)
        return res.content

    async def stream_chat_with_role(self, message: str):
        role = make_role(message)
        async for chunk in self.llm.astream([role]):
            yield chunk

    def chat_with_batch(self, messages: str) -> list[BaseMessage]:
        # string to array
        msgs = messages.split(",")
        res = self.llm.batch([
            HumanMessage(content=msg).content for msg in msgs
        ])
        return res

    def chat_with_parse(self, message: str) -> str:
        role = make_role(message)
        res = self.llm.invoke([
            role
        ])
        str_parser = StrOutputParser()
        return str_parser.parse(res.content)
       # return res.content

    def chat_with_json(self, message: str) -> str:
        parser = JsonOutputParser(pydantic_object=ChatJsonPaser)
        role = make_role(message)
        res = self.llm.invoke([
            role,
            parser.get_format_instructions(),
        ])
        return parser.parse(res.content)

    def chain_chat(self, message: str) -> str | None:
        try:
            prompt = make_role_base()
            paser = StrOutputParser()
            chain = prompt | self.llm | paser
            res = chain.invoke({"text": message,
                                "input_language": "English",
                                "output_language": "Chinese",
                                "example_input": "I am a good boy",
                                "example_output": "我好棒",
                                })
            return res
        except Exception as e:
            print(e)


    def chain_with_parallel(self, message: str) -> str | None:
        try:
            english_prompt = make_role_base_english()
            explain_prompt = make_role_base_explain()
            paser = StrOutputParser()
            chain1 = english_prompt | self.llm | paser
            chain2 = explain_prompt | self.llm | paser

            map_chain = RunnableParallel(english=chain1, explain=chain2)
            res = map_chain.invoke({"text": message})
            return res
        except Exception as e:
            print(e)

    def chain_with_retrieval(self, query: str) -> str | None:
        try:
            # 1.编排prompt
            prompt = ChatPromptTemplate.from_template("""请根据用户的问题回答，可以参考对应的上下文进行生成。

            <context>
            {context}
            </context>

            用户的提问是: {query}""")

            paser = StrOutputParser()
            chain = RunnablePassthrough.assign(context=lambda x: retrieval(x["query"])) | prompt | self.llm | paser
            res = chain.invoke({"query": query})
            return res
        except Exception as e:
            print(e)
            return  None

    def chain_with_callback(self, query: str) -> str | None:
        try:
            # 1.编排prompt
            prompt = ChatPromptTemplate.from_template("""请根据用户问题回答，可以参考对应的上下文进行生成。

            <context>
            {context}
            </context>

            用户的提问是: {query}""")

            paser = StrOutputParser()
            chain = RunnablePassthrough.assign(context=lambda x: retrieval(x["query"])) | prompt | self.llm | paser
            res = chain.invoke({"query": query}, config=RunnableConfig(callbacks=[StdOutCallbackHandler(), LlmCallbackHandler()]))
            return res
        except Exception as e:
            print(e)
            return None


    def chain_chat_with_memory(self, query: str) -> str | None:
        try:
            # 1.编排prompt
            prompt = ChatPromptTemplate.from_messages([
                ("system", "你是OpenAI开发的聊天机器人，请根据对应的上下文回复用户问题"),
                MessagesPlaceholder("history"),  # 需要的history其实是一个列表
                ("human", "{query}"),
            ])
            # 2.创建内存
            memory = ConversationTokenBufferMemory(return_messages=True,input_key="query", llm=self.llm)

            parser = StrOutputParser()
            # 3.创建链
            chain = RunnablePassthrough.assign(history=lambda x: memory.load_memory_variables(x)["history"]) | prompt | self.llm | parser
            # 4..invoke
            res = chain.invoke({"query": query})
            # 5.保存内存
            memory.save_context({"query": query}, {"output": res})
            print(memory.load_memory_variables({}))
            return res
        except Exception as e:
            print(e)
            return None

    async def chain_chat_with_stream(self, query: str, history: list = []):
        try:
            # 1.编排prompt
            prompt = ChatPromptTemplate.from_messages([
                ("system", "你是OpenAI开发的聊天机器人，请根据对应的上下文回复用户问题"),
                MessagesPlaceholder("history"),  # 需要的history其实是一个列表
                ("human", "{query}"),
            ])

            parser = StrOutputParser()
            # 3.创建链
            chain = prompt | self.llm | parser
            # 4. stream
            async for chunk in chain.astream({"query": query, "history": history}):
                yield chunk
        except Exception as e:
            print(e)
            return
    
    async def chain_chat_with_memory_stream(
        self, 
        query: str, 
        conversation_id: Optional[int] = None,
        user_id: Optional[int] = None,
        db: Optional[Session] = None
    ) -> AsyncGenerator[str, None]:
        """
        使用MySQL数据库和摘要记忆功能的流式聊天接口
        
        Args:
            query: 用户输入的问题
            conversation_id: 对话ID，如果为None则创建新对话
            user_id: 用户ID（可选）
            db: 数据库会话
        
        Yields:
            str: 流式响应的文本块
        """
        if db is None:
            db = next(get_sync_db())
        
        try:
            # 1. 处理对话ID
            if conversation_id is None:
                # 创建新对话
                conversation = self.memory_service.create_conversation(db, user_id=user_id)
                conversation_id = conversation.id
                yield f"data: {{\"conversation_id\": {conversation_id}}}\n\n"
            else:
                # 验证对话是否存在
                conversation = self.memory_service.get_conversation(db, conversation_id)
                if not conversation:
                    raise ValueError(f"对话 {conversation_id} 不存在")
            
            # 2. 保存用户消息
            self.memory_service.save_message(
                db, conversation_id, MessageRole.USER, query
            )
            
            # 3. 获取对话上下文（摘要 + 最近消息）
            context_messages = await self.memory_service.get_conversation_context(db, conversation_id)
            
            # 4. 构建prompt
            prompt = ChatPromptTemplate.from_messages([
                ("system", "你是一个智能助手，请根据对话历史和上下文回复用户问题。如果有历史摘要，请参考摘要内容保持对话连贯性。"),
                MessagesPlaceholder("context"),
                ("human", "{query}"),
            ])
            
            parser = StrOutputParser()
            chain = prompt | self.llm | parser
            
            # 5. 流式生成回复
            assistant_response = ""
            async for chunk in chain.astream({
                "query": query, 
                "context": context_messages
            }):
                if chunk:
                    assistant_response += chunk
                    # 使用与agent模式相同的输出格式
                    yield chunk
            
            # 6. 保存助手回复
            if assistant_response:
                self.memory_service.save_message(
                    db, conversation_id, MessageRole.ASSISTANT, assistant_response
                )
            
            # 7. 检查是否需要生成摘要
            await self.memory_service.process_and_summarize(db, conversation_id)
            
        except Exception as e:
            error_msg = f"聊天服务错误: {str(e)}"
            print(error_msg)
            yield f"错误: {error_msg}"
        finally:
            if db:
                db.close()
    
    async def get_conversation_history(
        self, 
        conversation_id: int, 
        limit: Optional[int] = 50,
        db: Optional[Session] = None
    ) -> list[dict]:
        """
        获取对话历史
        
        Args:
            conversation_id: 对话ID
            limit: 返回消息数量限制
            db: 数据库会话
            
        Returns:
            list[dict]: 消息列表
        """
        if db is None:
            db = next(get_sync_db())
        
        try:
            messages = self.memory_service.get_conversation_messages(db, conversation_id, limit)
            return [
                {
                    "id": msg.id,
                    "role": msg.role.value,
                    "content": msg.content,
                    "sequence_number": msg.sequence_number,
                    "created_at": msg.created_at.isoformat()
                }
                for msg in messages
            ]
        except Exception as e:
            print(f"获取对话历史错误: {str(e)}")
            return []
        finally:
            if db:
                db.close()
    
    async def get_conversation_summaries(
        self, 
        conversation_id: int,
        db: Optional[Session] = None
    ) -> list[dict]:
        """
        获取对话摘要
        
        Args:
            conversation_id: 对话ID
            db: 数据库会话
            
        Returns:
            list[dict]: 摘要列表
        """
        if db is None:
            db = next(get_sync_db())
        
        try:
            summaries = self.memory_service.get_active_summaries(db, conversation_id)
            return [
                {
                    "id": summary.id,
                    "conversation_id": summary.conversation_id,
                    "summary_content": summary.summary_content,
                    "start_message_seq": summary.start_message_seq,
                    "end_message_seq": summary.end_message_seq,
                    "summary_type": summary.summary_type,
                    "created_at": summary.created_at.isoformat()
                }
                for summary in summaries
            ]
        except Exception as e:
            print(f"获取对话摘要错误: {str(e)}")
            return []
        finally:
            if db:
                db.close()
    

    def _register_tools(self) -> Dict[str, Callable]:
        """
        注册智能体可用的工具

        Returns:
            Dict[str, Callable]: 工具名称到函数的映射
        """
        tools = {
            "web_search": self._web_search,
            "get_current_time": self._get_current_time,
            "calculate": self._calculate,
            "get_weather": self._get_weather,
            "get_conversation_context": self._get_conversation_context,
            "get_zodiac_sign": self._get_zodiac_sign,
            "get_all_zodiac_signs": self._get_all_zodiac_signs,
            "get_zodiac_compatibility": self._get_zodiac_compatibility,
            "handle_system_query": self._handle_system_query
        }
        return tools
    
    async def _web_search(self, query: str, num_results: int = 3) -> str:
        """
        网络搜索工具
        
        Args:
            query: 搜索查询
            num_results: 返回结果数量
            
        Returns:
            str: 搜索结果
        """
        try:
            # 使用DuckDuckGo搜索API（免费且无需API密钥）
            search_url = f"https://api.duckduckgo.com/?q={query}&format=json&no_html=1&skip_disambig=1"
            
            async with aiohttp.ClientSession() as session:
                async with session.get(search_url) as response:
                    if response.status == 200:
                        data = await response.json()
                        
                        # 提取搜索结果
                        results = []
                        
                        # 获取即时答案
                        if data.get('Answer'):
                            results.append(f"即时答案: {data['Answer']}")
                        
                        # 获取摘要信息
                        if data.get('Abstract'):
                            results.append(f"摘要: {data['Abstract']}")
                        
                        # 获取相关主题
                        if data.get('RelatedTopics'):
                            for i, topic in enumerate(data['RelatedTopics'][:num_results]):
                                if isinstance(topic, dict) and topic.get('Text'):
                                    results.append(f"相关信息{i+1}: {topic['Text']}")
                        
                        if results:
                            return "\n\n".join(results)
                        else:
                            return f"未找到关于'{query}'的相关信息"
                    else:
                        return f"搜索请求失败，状态码: {response.status}"
                        
        except Exception as e:
            return f"搜索过程中发生错误: {str(e)}"
    
    def _get_current_time(self) -> str:
        """
        获取当前时间
        
        Returns:
            str: 当前时间字符串
        """
        return datetime.now().strftime("%Y年%m月%d日 %H:%M:%S")
    
    def _calculate(self, expression: str) -> str:
        """
        计算数学表达式
        
        Args:
            expression: 数学表达式
            
        Returns:
            str: 计算结果
        """
        try:
            # 安全的数学表达式计算
            allowed_chars = set('0123456789+-*/().= ')
            if not all(c in allowed_chars for c in expression):
                return "表达式包含不安全的字符"
            
            result = eval(expression)
            return f"{expression} = {result}"
        except Exception as e:
            return f"计算错误: {str(e)}"
    
    async def _get_weather(self, city: str) -> str:
        """
        获取天气信息（模拟实现）
        
        Args:
            city: 城市名称
            
        Returns:
            str: 天气信息
        """
        # 这里是一个模拟实现，实际应用中可以接入真实的天气API
        weather_data = {
            "北京": "晴天，温度15-25°C，微风",
            "上海": "多云，温度18-28°C，东南风",
            "广州": "小雨，温度20-30°C，南风",
            "深圳": "阴天，温度22-32°C，无风"
        }
        
        return weather_data.get(city, f"暂无{city}的天气信息")
    
    async def _get_conversation_context(self, conversation_id: int) -> str:
        """
        获取对话上下文
        
        Args:
            conversation_id: 对话ID
            
        Returns:
            str: 对话上下文摘要
        """
        try:
            history = await self.get_conversation_history(conversation_id, limit=5)
            if not history:
                return "暂无对话历史"
            
            context_summary = "最近的对话内容:\n"
            for msg in history[-3:]:  # 只取最近3条
                role = "用户" if msg['role'] == 'user' else "助手"
                content = msg['content'][:100] + "..." if len(msg['content']) > 100 else msg['content']
                context_summary += f"{role}: {content}\n"
            
            return context_summary
        except Exception as e:
            return f"获取对话上下文失败: {str(e)}"

    def _get_zodiac_sign(self, year: int, month: int = None, day: int = None) -> str:
        """
        获取生肖信息工具

        Args:
            year: 公历年份
            month: 公历月份 (1-12), 可选
            day: 公历日期 (1-31), 可选

        Returns:
            str: 生肖信息
        """
        try:
            result = get_zodiac_sign(year, month, day)
            if "error" in result:
                return result["error"]

            # 格式化输出
            input_info = f"{result['input']['year']}年"
            if result['input']['month']:
                input_info += f"{result['input']['month']}月"
            if result['input']['day']:
                input_info += f"{result['input']['day']}日"

            zodiac_info = f"""
📅 {input_info} 的生肖信息:
🐉 生肖: {result['zodiac_sign']} ({result['zodiac_english']})
🐲 生肖年: {result['zodiac_year']}年
🧧 春节: {result['chinese_new_year']}
⭐ 特质: {result['attributes']['traits']}
🎰 幸运数字: {', '.join(map(str, result['attributes']['lucky_numbers']))}
🎨 幸运颜色: {', '.join(result['attributes']['lucky_colors'])}
            """.strip()

            return zodiac_info
        except Exception as e:
            return f"查询生肖信息时出错: {str(e)}"

    def _get_all_zodiac_signs(self) -> str:
        """
        获取所有生肖信息工具

        Returns:
            str: 所有生肖信息
        """
        try:
            all_zodiacs = get_all_zodiac_signs()
            result = "🐲 十二生肖信息 🐲\n\n"

            for zodiac in all_zodiacs:
                result += f"""
{zodiac['sign']} ({zodiac['english']}):
📅 代表年份: {', '.join(map(str, zodiac['attributes']['years'][:3]))}...
⭐ 特质: {zodiac['attributes']['traits']}
🎰 幸运数字: {', '.join(map(str, zodiac['attributes']['lucky_numbers']))}
🎨 幸运颜色: {', '.join(zodiac['attributes']['lucky_colors'])}
                """.strip() + "\n\n"

            return result.strip()
        except Exception as e:
            return f"查询生肖信息时出错: {str(e)}"

    def _get_zodiac_compatibility(self, zodiac1: str, zodiac2: str) -> str:
        """
        获取生肖配对工具

        Args:
            zodiac1: 第一个生肖
            zodiac2: 第二个生肖

        Returns:
            str: 生肖配对信息
        """
        try:
            result = get_zodiac_compatibility(zodiac1, zodiac2)
            if "error" in result:
                return result["error"]

            compatibility_info = f"""
💕 {zodiac1} 与 {zodiac2} 的配对分析:
🐉 {zodiac1} 对 {zodiac2}: {result['compatibility']}
🐲 {zodiac2} 对 {zodiac1}: {result['reverse_compatibility']}
💖 整体配对: {result['overall']}
            """.strip()

            return compatibility_info
        except Exception as e:
            return f"查询生肖配对时出错: {str(e)}"

    def _handle_system_query(self, query: str) -> str:
        """
        处理系统查询工具

        Args:
            query: 用户的系统相关查询

        Returns:
            str: 系统查询结果
        """
        try:
            return handle_system_query(query)
        except Exception as e:
            return f"处理系统查询时出错: {str(e)}"
    
    async def _analyze_intent(self, query: str) -> Dict[str, Any]:
        """
        分析用户意图，决定是否需要使用工具
        
        Args:
            query: 用户查询
            
        Returns:
            Dict: 包含意图分析结果和推荐工具的字典
        """
        # 创建意图分析的提示模板
        intent_prompt = ChatPromptTemplate.from_messages([
            SystemMessage(content="""
你是一个智能意图分析助手。分析用户的查询，判断是否需要使用工具来获取信息。

可用工具:
1. web_search - 网络搜索，用于获取实时信息、新闻、知识等
2. get_current_time - 获取当前时间
3. calculate - 数学计算
4. get_weather - 获取天气信息
5. get_conversation_context - 获取对话上下文
6. get_zodiac_sign - 获取生肖信息，输入年份和月份
7. get_all_zodiac_signs - 获取所有生肖信息
8. get_zodiac_compatibility - 获取生肖配对信息
9. handle_system_query - 处理系统查询，如会话统计、服务器负载等

请分析用户查询，返回JSON格式的结果:
{
    "need_tools": true/false,
    "recommended_tools": ["tool_name1", "tool_name2"],
    "tool_params": {"tool_name": {"param1": "value1"}},
    "reasoning": "分析原因"
}

如果用户询问的是常识性问题或者你可以直接回答的问题，则不需要使用工具。
只有当需要获取实时信息、进行计算、查询天气、查询生肖信息、查询系统状态等特定操作时才使用工具。
            """),
            HumanMessage(content=f"用户查询: {query}")
        ])
        
        try:
            llm = ChatOpenAI(
                model=self.model_name,
                temperature=0.1,
                api_key=settings.openai_api_key,
                base_url=settings.openai_base_url
            )
            
            response = await llm.ainvoke(intent_prompt.format_messages())
            
            # 尝试解析JSON响应
            try:
                intent_result = json.loads(response.content)
                return intent_result
            except json.JSONDecodeError:
                # 如果解析失败，返回默认结果
                return {
                    "need_tools": False,
                    "recommended_tools": [],
                    "tool_params": {},
                    "reasoning": "意图分析解析失败，使用直接回答"
                }
                
        except Exception as e:
            print(f"意图分析错误: {str(e)}")
            return {
                "need_tools": False,
                "recommended_tools": [],
                "tool_params": {},
                "reasoning": f"意图分析失败: {str(e)}"
            }
    
    async def _execute_tools(self, tools_to_use: List[str], tool_params: Dict[str, Dict]) -> Dict[str, str]:
        """
        执行指定的工具
        
        Args:
            tools_to_use: 要使用的工具列表
            tool_params: 工具参数
            
        Returns:
            Dict[str, str]: 工具名称到执行结果的映射
        """
        available_tools = self._register_tools()
        results = {}
        
        for tool_name in tools_to_use:
            if tool_name in available_tools:
                try:
                    tool_func = available_tools[tool_name]
                    params = tool_params.get(tool_name, {})
                    
                    # 检查是否是异步函数
                    if asyncio.iscoroutinefunction(tool_func):
                        result = await tool_func(**params)
                    else:
                        result = tool_func(**params)
                    
                    results[tool_name] = result
                except Exception as e:
                    results[tool_name] = f"工具执行错误: {str(e)}"
            else:
                results[tool_name] = f"未知工具: {tool_name}"
        
        return results
    
    async def agent_chat(self, query: str, conversation_id: int, user_id: int) -> str:
        """
        智能体聊天方法 - 具备工具调用和网络访问能力
        
        Args:
            query: 用户输入
            conversation_id: 对话ID
            user_id: 用户ID
            
        Returns:
            str: 智能体回复
        """
        try:
            # 1. 分析用户意图
            intent_analysis = await self._analyze_intent(query)
            
            tool_results = {}
            
            # 2. 如果需要使用工具，则执行工具
            if intent_analysis.get("need_tools", False):
                tools_to_use = intent_analysis.get("recommended_tools", [])
                tool_params = intent_analysis.get("tool_params", {})
                
                if tools_to_use:
                    print(f"正在使用工具: {', '.join(tools_to_use)}")
                    tool_results = await self._execute_tools(tools_to_use, tool_params)
            
            # 3. 获取对话历史
            conversation_history = []
            try:
                from app.core.database import get_sync_db
                db = next(get_sync_db())
                memory_service = ChatMemoryService()
                
                # 获取对话上下文（包括历史消息和摘要）
                conversation_history = await memory_service.get_conversation_context(
                    db=db,
                    conversation_id=conversation_id
                )
                
                db.close()
            except Exception as e:
                print(f"获取对话历史失败: {str(e)}")
            
            # 4. 构建最终的回答提示
            system_message = """
你是一个智能助手，能够使用各种工具来帮助用户解答问题。

请根据用户的问题、对话历史和工具执行结果（如果有），给出准确、有用的回答。

注意事项:
1. 如果使用了工具获取信息，请基于工具结果回答
2. 如果工具结果不完整或有错误，请说明并尽力回答
3. 保持回答的自然性和友好性
4. 如果没有使用工具，请基于你的知识和对话历史回答
5. 注意对话的连贯性，参考之前的对话内容
            """
            
            # 构建消息
            messages = [SystemMessage(content=system_message)]
            
            # 添加对话历史
            messages.extend(conversation_history)
            
            # 添加工具结果（如果有）
            if tool_results:
                tool_info = "工具执行结果:\n"
                for tool_name, result in tool_results.items():
                    tool_info += f"{tool_name}: {result}\n"
                messages.append(HumanMessage(content=tool_info))
            
            # 添加用户问题
            messages.append(HumanMessage(content=query))
            
            # 5. 生成最终回答
            llm = ChatOpenAI(
                model=self.model_name,
                temperature=0.7,
                api_key=settings.openai_api_key,
                base_url=settings.openai_base_url
            )
            
            response = await llm.ainvoke(messages)
            
            # 6. 保存对话记录（可选）
            try:
                from app.core.database import get_sync_db
                db = next(get_sync_db())
                memory_service = ChatMemoryService()
                memory_service.save_message(
                    db=db,
                    conversation_id=conversation_id,
                    role=MessageRole.USER,
                    content=query
                )
                memory_service.save_message(
                    db=db,
                    conversation_id=conversation_id,
                    role=MessageRole.ASSISTANT,
                    content=response.content
                )
                db.close()
            except Exception as e:
                print(f"保存对话记录失败: {str(e)}")
            
            return response.content
            
        except Exception as e:
            error_msg = f"智能体处理错误: {str(e)}"
            print(error_msg)
            return f"抱歉，处理您的请求时遇到了问题: {str(e)}"
    
    async def agent_chat_stream(self, query: str, conversation_id: Optional[int], user_id: int) -> AsyncGenerator[str, None]:
        """
        智能体流式聊天方法 - 具备工具调用和网络访问能力
        
        Args:
            query: 用户输入
            conversation_id: 对话ID（可选，如果为None则创建新对话）
            user_id: 用户ID
            
        Yields:
            str: 流式输出的文本片段
        """
        try:
            # 0. 确保有有效的conversation_id
            if conversation_id is None:
                from app.core.database import get_sync_db
                db = next(get_sync_db())
                memory_service = ChatMemoryService()
                
                # 尝试获取用户最新的agent模式对话
                existing_conversation = memory_service.get_latest_conversation_by_mode(
                    db, user_id, ChatMode.AGENT
                )
                
                if existing_conversation:
                    conversation_id = existing_conversation.id
                else:
                    # 如果没有现有的agent对话，创建新的
                    conversation = memory_service.create_conversation(
                        db, 
                        title="智能体对话",
                        user_id=user_id, 
                        chat_mode=ChatMode.AGENT
                    )
                    conversation_id = conversation.id
                db.close()
            
            # 1. 先输出状态信息
            yield "🤔 正在分析您的问题...\n\n"
            
            # 2. 分析用户意图
            intent_analysis = await self._analyze_intent(query)
            
            tool_results = {}
            
            # 3. 如果需要使用工具，则执行工具
            if intent_analysis.get("need_tools", False):
                tools_to_use = intent_analysis.get("recommended_tools", [])
                tool_params = intent_analysis.get("tool_params", {})
                
                if tools_to_use:
                    yield f"🛠️ 正在使用工具: {', '.join(tools_to_use)}\n\n"
                    tool_results = await self._execute_tools(tools_to_use, tool_params)
                    
                    # 输出工具执行结果
                    if tool_results:
                        yield "📊 工具执行完成，正在整理结果...\n\n"
            
            # 4. 获取对话历史
            conversation_history = []
            try:
                from app.core.database import get_sync_db
                db = next(get_sync_db())
                memory_service = ChatMemoryService()
                
                # 获取对话上下文（包括历史消息和摘要）
                conversation_history = await memory_service.get_conversation_context(
                    db=db,
                    conversation_id=conversation_id
                )
                
                db.close()
            except Exception as e:
                print(f"获取对话历史失败: {str(e)}")
            
            # 5. 构建最终的回答提示
            system_message = """
你是一个智能助手，能够使用各种工具来帮助用户解答问题。

请根据用户的问题、对话历史和工具执行结果（如果有），给出准确、有用的回答。

注意事项:
1. 如果使用了工具获取信息，请基于工具结果回答
2. 如果工具结果不完整或有错误，请说明并尽力回答
3. 保持回答的自然性和友好性
4. 如果没有使用工具，请基于你的知识和对话历史回答
5. 注意对话的连贯性，参考之前的对话内容
            """
            
            # 构建消息
            messages = [SystemMessage(content=system_message)]
            
            # 添加对话历史
            messages.extend(conversation_history)
            
            # 添加工具结果（如果有）
            if tool_results:
                tool_info = "工具执行结果:\n"
                for tool_name, result in tool_results.items():
                    tool_info += f"{tool_name}: {result}\n"
                messages.append(HumanMessage(content=tool_info))
            
            # 添加用户问题
            messages.append(HumanMessage(content=query))
            
          # 6. 使用LCEL路由链替代直接LLM调用进行回答生成
            yield "💭 正在使用路由链生成回答...\n\n"

            # 使用LCEL路由链替代直接LLM调用
            from app.services.lcel_routing_service import LCELRoutingService
            from langchain_core.runnables import RunnableLambda

            # 创建LCEL路由服务
            lcel_service = LCELRoutingService(
                model_name=self.model_name,
                temperature=0.7
            )

            # 创建智能路由处理器，结合工具结果和对话历史
            def smart_routing_processor(context: Dict[str, Any]) -> str:
                """智能路由处理器，结合工具结果和上下文"""
                user_query = context.get("query", "")
                tool_results = context.get("tool_results", {})
                conversation_history = context.get("conversation_history", [])

                # 根据工具结果和查询内容决定路由策略
                if tool_results:
                    # 如果有工具结果，优先使用工具结果回答，并包含对话历史
                    history_text = ""
                    if conversation_history:
                        history_text = "\n\n对话历史：\n"
                        for msg in conversation_history[-4:]:  # 只使用最近4条对话作为上下文
                            role = "用户" if msg.get("role") == "user" else "助手"
                            history_text += f"{role}: {msg.get('content', '')}\n"

                    if "get_zodiac_sign" in tool_results:
                        return f"基于工具结果回答用户的生肖问题: {user_query}\n\n对话历史: {history_text}\n\n工具结果: {tool_results['get_zodiac_sign']}"
                    elif "handle_system_query" in tool_results:
                        return f"基于工具结果回答用户的系统问题: {user_query}\n\n对话历史: {history_text}\n\n工具结果: {tool_results['handle_system_query']}"
                    elif "calculate" in tool_results:
                        return f"基于工具结果回答用户的数学问题: {user_query}\n\n对话历史: {history_text}\n\n工具结果: {tool_results['calculate']}"
                    else:
                        return f"基于工具结果回答用户问题: {user_query}\n\n对话历史: {history_text}\n\n工具结果: {str(tool_results)}"
                else:
                    # 没有工具结果，使用支持对话历史的路由链处理
                    try:
                        if conversation_history:
                            # 使用记忆感知的路由链
                            routing_chain = lcel_service.create_memory_aware_routing_branch_lcel()

                            # 格式化对话历史
                            formatted_history = lcel_service._format_conversation_history(conversation_history)

                            # 调用路由链，传入查询和对话历史
                            result = routing_chain.invoke({
                                "query": user_query,
                                "conversation_history": formatted_history
                            })
                            return result
                        else:
                            # 如果没有对话历史，使用标准路由链
                            routing_chain = lcel_service.create_routing_branch_lcel()
                            return routing_chain.invoke(user_query)
                    except Exception as e:
                        logger.error(f"记忆感知路由链处理失败: {e}")
                        # 回退到标准路由链
                        routing_chain = lcel_service.create_routing_branch_lcel()
                        return routing_chain.invoke(user_query)

            # 创建LCEL兼容的智能路由链
            smart_routing_chain = RunnableLambda(smart_routing_processor)

            # 构建上下文
            context = {
                "query": query,
                "tool_results": tool_results if tool_results else None,
                "conversation_history": conversation_history
            }

            # 执行智能路由链并模拟流式输出
            try:
                # 先使用智能路由链获取完整结果
                full_result = smart_routing_chain.invoke(context)

                # 然后模拟流式输出
                words = full_result.split()
                current_chunk = ""
                delay = 0.02  # 更小的延迟，更流畅的流式体验

                for word in words:
                    current_chunk += word + " "
                    if len(current_chunk) > 12 or word == words[-1]:  # 每12个字符或最后一个词输出一次
                        yield current_chunk
                        current_chunk = ""
                        await asyncio.sleep(delay)

                full_response = full_result

            except Exception:
                # 如果路由链失败，回退到传统LLM调用
                yield "⚠️ 路由链处理失败，回退到传统模式...\n\n"

                llm = ChatOpenAI(
                    model=self.model_name,
                    temperature=0.7,
                    api_key=settings.openai_api_key,
                    base_url=settings.openai_base_url,
                    streaming=True
                )

                full_response = ""
                async for chunk in llm.astream(messages):
                    if chunk.content:
                        full_response += chunk.content
                        yield chunk.content
            
            # 7. 保存对话记录
            try:
                from app.core.database import get_sync_db
                db = next(get_sync_db())
                memory_service = ChatMemoryService()
                memory_service.save_message(
                    db=db,
                    conversation_id=conversation_id,
                    role=MessageRole.USER,
                    content=query
                )
                memory_service.save_message(
                    db=db,
                    conversation_id=conversation_id,
                    role=MessageRole.ASSISTANT,
                    content=full_response
                )
                db.close()
            except Exception as e:
                print(f"保存对话记录失败: {str(e)}")
            
        except Exception as e:
            error_msg = f"智能体处理错误: {str(e)}"
            print(error_msg)
            yield f"\n\n❌ 抱歉，处理您的请求时遇到了问题: {str(e)}"

    def routing_chat(self, query: str) -> str:
        """
        路由链聊天模式 - 使用路由链自动选择处理方式

        Args:
            query: 用户输入

        Returns:
            str: 路由链处理结果
        """
        try:
            from app.services.routing_chain_service import RoutingChainService

            routing_service = RoutingChainService(
                model_name=self.model_name,
                temperature=self.temperature
            )

            # Use semantic routing for fast and reliable routing
            semantic_chain = routing_service.create_semantic_routing_chain()
            result = semantic_chain(query)

            return result

        except Exception as e:
            error_msg = f"路由链处理错误: {str(e)}"
            print(error_msg)
            return f"抱歉，处理您的请求时遇到了问题: {str(e)}"

    async def routing_chat_stream(self, query: str) -> AsyncGenerator[str, None]:
        """
        路由链流式聊天模式

        Args:
            query: 用户输入

        Yields:
            str: 流式输出的文本片段
        """
        try:
            from app.services.routing_chain_service import RoutingChainService

            # First, determine which route to take
            yield "🔍 正在分析问题类型...\n\n"

            routing_service = RoutingChainService(
                model_name=self.model_name,
                temperature=self.temperature
            )

            # Use semantic routing
            semantic_chain = routing_service.create_semantic_routing_chain()

            yield "🚀 已选择处理链，正在生成回答...\n\n"

            # For streaming, we'll simulate it by processing the result
            result = semantic_chain(query)

            # Simulate streaming by yielding chunks
            words = result.split()
            current_chunk = ""

            for word in words:
                current_chunk += word + " "
                if len(current_chunk) > 20 or word == words[-1]:  # Yield every 20 chars or at last word
                    yield current_chunk
                    current_chunk = ""
                    await asyncio.sleep(0.05)  # Small delay for streaming effect

        except Exception as e:
            error_msg = f"路由链流式处理错误: {str(e)}"
            print(error_msg)
            yield f"\n\n❌ 抱歉，处理您的请求时遇到了问题: {str(e)}"
    
            