from langchain_core.prompts import ChatPromptTemplate
from langgraph.graph import END, Graph
from typing import TypedDict, Annotated, Sequence
from my_tools.debate_evidence_retriever import DebateEvidenceTool


class DebateState(TypedDict):
    stage: str
    role: str
    task: str
    history: Annotated[Sequence[str], lambda a, b: a + b]
    llm_thought: str
    speech: str



class DebateAgent:
    def __init__(self, llm, evidence_tool: DebateEvidenceTool):
        self.llm = llm
        self.evidence_tool = evidence_tool
        self._build_workflow()

    def _build_workflow(self):
        """构建LangGraph工作流"""
        self.workflow = Graph()

        # 定义节点
        self.workflow.add_node("decide_evidence", self._decide_evidence)
        self.workflow.add_node("retrieve_evidence", self._retrieve_evidence)
        self.workflow.add_node("generate_speech", self._generate_speech)

        # 设置条件分支
        self.workflow.add_conditional_edges(
            "decide_evidence",
            self._decide_evidence,
            {"evidence_needed": "retrieve_evidence", "no_evidence": "generate_speech"}
        )

        # 设置常规边
        self.workflow.add_edge("retrieve_evidence", "generate_speech")
        self.workflow.add_edge("generate_speech", END)

        # 编译工作流
        self.compiled_workflow = self.workflow.compile()

    def _decide_evidence(self, state: DebateState):
        """决策节点逻辑"""
        prompt = ChatPromptTemplate.from_template("""
            你当前处于{stage}阶段，角色是{role}，任务：{task}
            历史论点：{history}
            是否需要真实数据支持？回答"需要"或"不需要"
        """)
        chain = prompt | self.llm
        decision = chain.invoke(state).content
        state["llm_thought"] = f"决策：{decision}"
        return "evidence_needed" if "需要" in decision else "no_evidence"

    def _retrieve_evidence(self, state: DebateState):
        """证据检索逻辑"""
        evidence = self.evidence_tool.tool.run({"claim": state["task"]})
        return {"evidence_text": evidence}

    def _generate_speech(self, state: DebateState):
        """辩词生成逻辑"""
        prompt = ChatPromptTemplate.from_template("""
            {llm_thought}
            {evidence_text}
            请以{role}身份生成{stage}阶段辩词，要求：
            1. 回应用户任务：{task}
            2. 回应历史论点：{history}
        """)
        chain = prompt | self.llm
        response = chain.invoke(state)
        return {"speech": response.content}

    def invoke(self, stage: str, role: str, task: str, history: list = None):
        state = {
            "stage": stage,
            "role": role,
            "task": task,
            "history": history or [],
            "llm_thought": "",
            "speech": ""
        }
        result = self.compiled_workflow.invoke(state)
        return result["speech"]  # 直接返回字符串


# 使用示例
if __name__ == "__main__":
    agent = DebateAgent(r1, tool_instance)
    speech = agent.invoke(
        stage="立论",
        role="正方一辩",
        task="论证手机对中学生利大于弊",
        history=[]
    )
    print("\n生成的辩词：", speech)  # 直接输出字符串
