"""
使用 LangGraph 构建的高级聊天机器人
支持：
1. Web 搜索
2. 常见问题回答
3. 对话状态保持
4. 复杂查询人工审核
5. 自定义状态控制
6. 对话路径探索
"""

import os
import logging
from typing import Dict, List, Optional, TypedDict, Annotated, Sequence
from dotenv import load_dotenv
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain_community.llms.tongyi import Tongyi
from langchain_community.tools import TavilySearchResults
from langchain_core.tools import Tool
from langchain_core.prompts import ChatPromptTemplate
from langgraph.graph import StateGraph, END, START
from langchain_core.tracers import ConsoleCallbackHandler
from langgraph.graph.message import add_messages

# 配置日志
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

# 加载环境变量
load_dotenv()

# 定义状态类型
class AgentState(TypedDict):
    """聊天机器人的状态"""
    messages: Annotated[list, add_messages]
    next: Annotated[str, "下一步操作"]
    current_question: Annotated[str, "当前问题"]
    search_results: Annotated[Optional[str], "搜索结果"]
    needs_human_review: Annotated[bool, "是否需要人工审核"]
    human_feedback: Annotated[Optional[str], "人工反馈"]

class AdvancedChatbot:
    def __init__(self, debug: bool = False):
        """
        初始化聊天机器人
        
        Args:
            debug: 是否启用调试模式
        """
        self.api_key = os.getenv("DASHSCOPE_API_KEY")
        if not self.api_key:
            raise ValueError("请设置 DASHSCOPE_API_KEY 环境变量")
        
        # 设置回调处理器
        self.callbacks = [ConsoleCallbackHandler()] if debug else None
        
        # 初始化通义大模型
        self.llm = Tongyi(
            model_name="qwen-plus",  # 可选: qwen-turbo, qwen-plus, qwen-max
            temperature=0.7,
            api_key=self.api_key,
            callbacks=self.callbacks
        )

        # 创建搜索引擎工具
        self.search = TavilySearchResults()
        self.tools = [
            Tool(
                name="search",
                func=self.search.run,
                description="用于搜索互联网上的信息。输入应该是一个搜索查询。"
            )
        ]

        # 创建提示词模板
        self.prompt = ChatPromptTemplate.from_messages([
            ("system", """你是一个智能助手，可以使用搜索引擎来回答问题。
            当你需要查找信息时，使用搜索工具。
            请用简洁明了的方式回答问题。
            如果搜索结果不够准确，请说明情况。
            
            可用工具:
            {tools}
            
            工具名称: {tool_names}
            
            历史对话:
            {history}
            
            当前问题: {input}
            
            思考过程:
            {agent_scratchpad}
            
            请按照以下格式回答：
            Thought: 思考过程
            Action: 要使用的工具名称
            Action Input: 工具的输入
            Observation: 工具的返回结果
            Thought: 根据结果继续思考
            Final Answer: 最终答案""")
        ])

        # 创建状态图
        self.workflow = self._create_workflow()

    def _create_workflow(self) -> StateGraph:
        """创建状态图"""
        # 创建状态图
        workflow = StateGraph(AgentState)

        # 添加节点
        workflow.add_node("analyze_question", self._analyze_question)
        workflow.add_node("search_web", self._search_web)
        workflow.add_node("generate_response", self._generate_response)
        workflow.add_node("human_review", self._human_review)

        # 添加边
        workflow.add_edge(START, "analyze_question")
        
        # 根据 next 字段的值决定下一步
        def router(state: AgentState) -> str:
            logger.debug(f"Router: next = {state['next']}")
            return state["next"]
        
        workflow.add_conditional_edges(
            "analyze_question",
            router,
            {
                "search_web": "search_web",
                "generate_response": "generate_response"
            }
        )
        
        workflow.add_edge("search_web", "generate_response")
        workflow.add_edge("generate_response", "human_review")
        workflow.add_edge("human_review", END)

        # 编译工作流
        return workflow.compile()

    def _analyze_question(self, state: AgentState) -> AgentState:
        """分析问题"""
        logger.debug(f"Analyzing question: {state['current_question']}")
        
        # 分析问题是否需要搜索
        response = self.llm.invoke(
            f"""分析以下问题是否需要搜索互联网来回答：
            {state['current_question']}
            
            只需要回答 'yes' 或 'no'。"""
        )
        
        logger.debug(f"Analysis response: {response}")
        
        if response.lower().strip() == 'yes':
            state['next'] = 'search_web'
        else:
            state['next'] = 'generate_response'
        
        logger.debug(f"Next step: {state['next']}")
        return state

    def _search_web(self, state: AgentState) -> AgentState:
        """搜索网络"""
        logger.debug("Searching web...")
        
        # 执行搜索
        search_results = self.tools[0].run(state['current_question'])
        state['search_results'] = str(search_results)
        
        logger.debug(f"Search results: {state['search_results'][:200]}...")
        return state

    def _generate_response(self, state: AgentState) -> AgentState:
        """生成回答"""
        logger.debug("Generating response...")
        
        # 准备输入
        input_data = {
            "input": state['current_question'],
            "history": state['messages'],
            "tools": self.tools,
            "tool_names": [tool.name for tool in self.tools],
            "agent_scratchpad": state.get('search_results', '')
        }
        
        # 生成回答
        response = self.llm.invoke(self.prompt.format(**input_data))
        logger.debug(f"LLM response: {response}")
        
        # 检查是否需要人工审核
        if "需要人工审核" in response or "无法确定" in response:
            state['needs_human_review'] = True
            logger.debug("Response needs human review")
        else:
            state['needs_human_review'] = False
            # 从回答中提取最终答案
            if "Final Answer:" in response:
                final_answer = response.split("Final Answer:")[-1].strip()
                logger.debug(f"Extracted final answer: {final_answer}")
                state['messages'].append({"role": "assistant", "content": final_answer})
            else:
                logger.debug("No Final Answer found, using full response")
                state['messages'].append({"role": "assistant", "content": response})
        
        return state

    def _human_review(self, state: AgentState) -> AgentState:
        """人工审核"""
        logger.debug("Checking if human review is needed...")
        
        if state['needs_human_review']:
            logger.debug("Human review required")
            print("\n需要人工审核的问题：")
            print(f"问题：{state['current_question']}")
            print(f"AI 回答：{state['messages'][-1]['content'] if state['messages'] else '无'}")
            print("\n请输入您的反馈（直接回车表示同意 AI 的回答）：")
            feedback = input().strip()
            
            if feedback:
                logger.debug("Human provided feedback")
                state['human_feedback'] = feedback
                state['messages'].append({"role": "assistant", "content": feedback})
            else:
                logger.debug("Human accepted AI response")
                state['messages'].append(state['messages'][-1])
        else:
            logger.debug("No human review needed")
        
        return state

    def chat(self, user_input: str) -> str:
        """
        处理用户输入
        
        Args:
            user_input: 用户输入的问题
            
        Returns:
            str: 机器人的回答
        """
        logger.debug(f"Processing user input: {user_input}")
        
        # 创建初始状态
        state = {
            "messages": [{"role": "user", "content": user_input}],
            "next": "analyze_question",
            "current_question": user_input,
            "search_results": None,
            "needs_human_review": False,
            "human_feedback": None
        }
        
        logger.debug("Starting workflow...")
        
        # 运行工作流
        for event in self.workflow.stream(state):
            logger.debug(f"Workflow event: {event}")
            if "human_review" in event and "messages" in event["human_review"]:
                messages = event["human_review"]["messages"]
                for message in messages:
                    if isinstance(message, dict) and message.get("role") == "assistant":
                        logger.debug(f"Found assistant message: {message['content']}")
                        return message["content"]
                    elif isinstance(message, AIMessage):
                        logger.debug(f"Found AIMessage: {message.content}")
                        return message.content
        
        logger.warning("No assistant message found in workflow events")
        return "抱歉，我无法回答这个问题。"

def main():
    """主函数"""
    chatbot = AdvancedChatbot(debug=True)
    
    print("欢迎使用高级聊天机器人！输入 'quit' 退出。")
    print("当前使用的是 qwen-plus 模型，您可以在代码中修改为 qwen-turbo 或 qwen-max")
    
    while True:
        try:
            user_input = input("\n请输入您的问题: ")
            if user_input.lower() == 'quit':
                break
                
            response = chatbot.chat(user_input)
            print(f"\nAI 助手: {response}")
        except Exception as e:
            logger.error(f"Error in main loop: {str(e)}", exc_info=True)
            print(f"发生错误: {str(e)}")

if __name__ == "__main__":
    main() 