import re

from langchain_community.llms import Ollama
from langgraph.graph import StateGraph, END

from agent.agent_state import AgentState, update_history
from agent.history import compress_history
from agent.tools import query_order_system, calculate_shipping
from models.vllm_service import VLLMServer


class AgentWorkflow:
    def __init__(self, retriever, llm_model="llama3:8b"):
        # 初始化LLM
        self.llm = Ollama(model=llm_model, temperature=0.2)

        # 检索模块
        self.retriever = retriever

        # 创建状态图
        self.workflow = StateGraph(AgentState)

        # 定义节点
        self.workflow.add_node("parse_input", self.parse_input)
        self.workflow.add_node("retrieve_docs", self.retrieve_docs)
        self.workflow.add_node("call_tools", self.call_tools)
        self.workflow.add_node("generate_response", self.generate_response)
        self.workflow.add_node("validate_response", self.validate_response)

        # 设置初始节点
        self.workflow.set_entry_point("parse_input")

        # 添加条件边
        self.workflow.add_conditional_edges(
            "parse_input",
            self.route_by_intent,
            {
                "retrieve": "retrieve_docs",
                "tool": "call_tools",
                "generate": "generate_response"
            }
        )

        # 添加常规边
        self.workflow.add_edge("retrieve_docs", "generate_response")
        self.workflow.add_edge("call_tools", "generate_response")
        self.workflow.add_edge("generate_response", "validate_response")
        self.workflow.add_edge("validate_response", END)

        # 编译状态机
        self.graph = self.workflow.compile()

    async def arun(self, state: AgentState) -> AgentState:
        """异步运行工作流"""
        return await self.graph.ainvoke(state)

    def parse_input(self, state: AgentState) -> AgentState:
        """解析用户意图"""
        user_input = state["user_input"]

        # 识别意图
        if "订单" in user_input or "查询" in user_input:
            state["current_step"] = "tool"
        elif "退货" in user_input or "政策" in user_input or "运费" in user_input:
            state["current_step"] = "retrieve"
        else:
            state["current_step"] = "generate"

        return state

    def route_by_intent(self, state: AgentState) -> str:
        """根据意图路由到下一节点"""
        return state["current_step"]

    def retrieve_docs(self, state: AgentState, reorder_type='model') -> AgentState:
        """检索相关文档"""
        query = state["user_input"]
        if reorder_type == 'model':
            state["retrieved_docs"] = self.retriever.model_retrieve(query, top_n=3)
        else:
            state["retrieved_docs"] = self.retriever.RRF_retrieve(query, top_n=3)
        return state

    def call_tools(self, state: AgentState) -> AgentState:
        """调用工具函数"""
        user_input = state["user_input"]

        if "订单" in user_input:
            state["tools_output"] = query_order_system(user_input)
        elif "运费" in user_input or "地址" in user_input:
            state["tools_output"] = calculate_shipping(user_input)

        return state

    async def generate_response(self, state: AgentState) -> AgentState:
        """使用vLLM生成响应"""
        # 准备提示模板
        prompt = self.build_prompt(state)

        try:
            # 获取vLLM实例
            vllm_server = VLLMServer.get_instance()

            # 异步生成响应
            response = await vllm_server.generate(
                prompt,
                max_tokens=512,
                temperature=0.2
            )

            # 更新状态
            state["response"] = response
        except Exception as e:
            print(f"生成响应失败: {e}")
            state["response"] = "抱歉，处理您的请求时出现问题。"

        return state

    def validate_response(self, state: AgentState) -> AgentState:
        """验证响应格式和内容"""
        response = state["response"]

        # 验证手机号
        if re.search(r'\d{11}', response):
            if not re.match(r'^1[3-9]\d{9}$', response):
                response = re.sub(r'\d{11}', "[手机号无效]", response)

        # 验证订单号
        order_match = re.search(r'订单[号]?[:：]?\s*([A-Z]{2}\d{9})', response)
        if order_match and len(order_match.group(1)) != 11:
            response += "\n(请注意订单号格式)"

        # 更新响应和历史
        state["response"] = response
        update_history(state, state["user_input"], is_user=True)
        update_history(state, response, is_user=False)

        return state

    def build_prompt(self, state: AgentState) -> str:
        """构建提示模板"""
        # 压缩历史对话
        compressed_history = compress_history(state["chat_history"])
        with open('../prompt/thought_instructions.txt', 'r', encoding='utf-8') as file:
            thought_instructions = file.read()
        with open('../prompt/constraints.txt', 'r', encoding='utf-8') as file:
            constraints = file.read()
        # 构建提示
        prompt_parts = [
            "你是一个专业的电商客服助手，基于给定信息回答问题:",
            "",
            f"### 约束条件:{constraints}",
            "",
            f"### 思考过程:{thought_instructions}",
            "",
            "### 历史对话:",
            *[f"- {msg.content}" for msg in compressed_history],
            "",
            "### 用户问题:",
            state["user_input"]
        ]

        # 添加检索结果
        if state.get("retrieved_docs"):
            prompt_parts.extend([
                "",
                "### 相关知识:",
                *[f"- {doc}" for doc in state["retrieved_docs"][:3]]
            ])

        # 添加工具输出
        if state.get("tools_output"):
            prompt_parts.extend([
                "",
                "### 工具输出:",
                state["tools_output"]
            ])

        prompt_parts.extend([
            "",
            "### 客服响应:"
        ])

        return "\n".join(prompt_parts)
