import os
from typing import Dict, Any, List, Annotated
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from langgraph.graph.message import add_messages
from langgraph.checkpoint.sqlite import SqliteSaver
import operator

# 设置OpenAI API密钥
os.environ["OPENAI_API_KEY"] = "your-api-key-here"

# 定义状态结构
class ConversationState:
    messages: Annotated[list, add_messages]  # 自动管理消息历史
    user_query: str
    current_step: str
    requires_human_intervention: bool = False
    resolution_summary: str = ""
    
    def __init__(self, **kwargs):
        self.messages = kwargs.get('messages', [])
        self.user_query = kwargs.get('user_query', '')
        self.current_step = kwargs.get('current_step', 'start')
        self.requires_human_intervention = kwargs.get('requires_human_intervention', False)
        self.resolution_summary = kwargs.get('resolution_summary', '')

# 初始化模型
llm = ChatOpenAI(model="gpt-3.5-turbo")

class CustomerServiceAgent:
    """客服代理节点"""
    
    def __call__(self, state: ConversationState) -> Dict[str, Any]:
        from langchain_core.messages import HumanMessage, AIMessage
        
        # 添加上下文信息
        context = f"""
        当前对话阶段: {state.current_step}
        用户问题: {state.user_query}
        """
        
        # 构建系统提示
        system_prompt = """你是一个专业的客服助手。请根据用户的问题提供准确、友好的帮助。
        如果问题涉及技术故障，请提供详细的解决步骤。
        如果问题需要人工干预(如退款、账户问题)，请明确说明。
        保持专业且友好的语气。"""
        
        # 调用模型
        response = llm.invoke([
            {"role": "system", "content": system_prompt + context},
            *state.messages
        ])
        
        # 分析响应，决定是否需要人工干预
        response_text = response.content.lower()
        requires_human = any(word in response_text for word in 
                           ['人工', '转接', '专员', '无法解决', '复杂'])
        
        return {
            "messages": [AIMessage(content=response.content)],
            "requires_human_intervention": requires_human,
            "current_step": "customer_service_response"
        }

class TechnicalSupportAgent:
    """技术支持代理节点"""
    
    def __call__(self, state: ConversationState) -> Dict[str, Any]:
        from langchain_core.messages import AIMessage
        
        system_prompt = """你是技术支持专家。专门解决技术问题，如软件故障、连接问题、错误代码等。
        提供详细的故障排除步骤，使用编号列表清晰说明。
        如果问题超出技术范围，建议联系相关团队。"""
        
        response = llm.invoke([
            {"role": "system", "content": system_prompt},
            *state.messages
        ])
        
        return {
            "messages": [AIMessage(content=response.content)],
            "current_step": "technical_support_response"
        }

class HumanInterventionRouter:
    """人工干预路由节点"""
    
    def __call__(self, state: ConversationState) -> Dict[str, Any]:
        from langchain_core.messages import AIMessage
        
        if state.requires_human_intervention:
            message = "我已将您的问题转接给人工客服，请稍等片刻，专员将很快为您服务。"
            next_node = "human_agent"
        else:
            message = "问题已解决！还有其他需要帮助的吗？"
            next_node = "end_conversation"
            
        return {
            "messages": [AIMessage(content=message)],
            "current_step": next_node
        }

class HumanAgent:
    """人工客服模拟节点"""
    
    def __call__(self, state: ConversationState) -> Dict[str, Any]:
        from langchain_core.messages import AIMessage
        
        # 模拟人工客服的响应
        message = "您好，我是人工客服专员。我已经了解您的情况，将为您提供专门的解决方案。"
        
        return {
            "messages": [AIMessage(content=message)],
            "resolution_summary": "问题已由人工客服接手处理",
            "current_step": "conversation_end"
        }

class ConversationRouter:
    """对话路由节点 - 决定对话流向"""
    
    def __call__(self, state: ConversationState) -> str:
        user_query = state.user_query.lower()
        
        # 基于用户查询内容路由
        if any(word in user_query for word in ['技术', '故障', '错误', '无法', '连接']):
            return "technical_support"
        elif any(word in user_query for word in ['退款', '投诉', '账户', '人工']):
            state.requires_human_intervention = True
            return "human_router"
        else:
            return "customer_service"

class SummaryNode:
    """对话总结节点"""
    
    def __call__(self, state: ConversationState) -> Dict[str, Any]:
        from langchain_core.messages import AIMessage
        
        # 生成对话总结
        summary_prompt = f"""
        基于以下对话生成一个简短的总结：
        用户问题: {state.user_query}
        解决状态: {state.resolution_summary or '通过AI助手解决'}
        """
        
        summary = llm.invoke([
            {"role": "system", "content": "生成简洁的对话总结"},
            {"role": "user", "content": summary_prompt}
        ])
        
        final_message = f"对话结束。总结: {summary.content}"
        
        return {
            "messages": [AIMessage(content=final_message)],
            "resolution_summary": summary.content
        }

def create_customer_service_graph():
    """创建客服对话图"""
    
    # 创建图
    workflow = StateGraph(ConversationState)
    
    # 添加节点
    workflow.add_node("router", ConversationRouter())
    workflow.add_node("customer_service", CustomerServiceAgent())
    workflow.add_node("technical_support", TechnicalSupportAgent())
    workflow.add_node("human_router", HumanInterventionRouter())
    workflow.add_node("human_agent", HumanAgent())
    workflow.add_node("summary", SummaryNode())
    
    # 设置入口点
    workflow.set_entry_point("router")
    
    # 添加边（定义节点间的流向）
    workflow.add_conditional_edges(
        "router",
        ConversationRouter(),
        {
            "customer_service": "customer_service",
            "technical_support": "technical_support", 
            "human_router": "human_router"
        }
    )
    
    workflow.add_edge("customer_service", "human_router")
    workflow.add_edge("technical_support", "human_router")
    workflow.add_conditional_edges(
        "human_router",
        lambda state: "human_agent" if state.requires_human_intervention else "summary",
        {
            "human_agent": "human_agent",
            "summary": "summary"
        }
    )
    workflow.add_edge("human_agent", "summary")
    workflow.add_edge("summary", END)
    
    # 添加检查点支持（持久化对话状态）
    memory = SqliteSaver.from_conn_string(":memory:")
    
    # 编译图
    app = workflow.compile(checkpointer=memory)
    
    return app

def run_conversation_example():
    """运行对话示例"""
    
    # 创建图应用
    app = create_customer_service_graph()
    
    # 配置对话线程
    config = {"configurable": {"thread_id": "user_123"}}
    
    # 测试不同场景的对话
    test_queries = [
        "我的账户无法登录，显示密码错误",
        "我想申请退款",
        "软件经常崩溃，怎么解决？",
        "查询订单状态"
    ]
    
    for i, query in enumerate(test_queries):
        print(f"\n{'='*50}")
        print(f"对话 {i+1}: {query}")
        print(f"{'='*50}")
        
        # 初始化状态
        initial_state = ConversationState(
            user_query=query,
            current_step="start"
        )
        
        # 执行图
        final_state = app.invoke(initial_state, config)
        
        # 输出结果
        print(f"最终状态: {final_state['current_step']}")
        print(f"需要人工干预: {final_state['requires_human_intervention']}")
        print(f"解决总结: {final_state.get('resolution_summary', 'N/A')}")
        
        # 显示对话历史
        print("\n对话历史:")
        for msg in final_state['messages']:
            role = "用户" if msg.type == "human" else "助手"
            print(f"{role}: {msg.content}")

def advanced_usage_example():
    """高级用法示例：流式处理和状态检查"""
    
    app = create_customer_service_graph()
    config = {"configurable": {"thread_id": "advanced_user_456"}}
    
    # 流式处理示例
    print("\n流式处理示例:")
    query = "我的应用无法启动，显示错误代码500"
    
    initial_state = ConversationState(user_query=query, current_step="start")
    
    # 获取流式事件
    events = app.stream(initial_state, config, stream_mode="values")
    
    for event in events:
        if 'current_step' in event:
            print(f"当前步骤: {event['current_step']}")
        if 'messages' in event and event['messages']:
            latest_msg = event['messages'][-1]
            if hasattr(latest_msg, 'content'):
                print(f"最新消息: {latest_msg.content}")

if __name__ == "__main__":
    # 运行基础示例
    run_conversation_example()
    
    # 运行高级示例
    advanced_usage_example()