import os
import uuid
import operator
from typing import List, Annotated, Sequence, TypedDict, Literal
from langchain_community.chat_models import ChatTongyi
from langchain_core.messages import SystemMessage, AIMessage, HumanMessage, ToolMessage, BaseMessage
from pydantic import BaseModel
from langgraph.graph import StateGraph, END, START
from langgraph.checkpoint.memory import MemorySaver

# 系统提示模板
template = """Your job is to get information from a user about what type of prompt template they want to create.
You should get the following information from them:
- What the objective of the prompt is
- What variables will be passed into the prompt template
- Any constraints for what the output should NOT do
- Any requirements that the output MUST adhere to
If you are not able to discern this info, ask them to clarify! Do not attempt to wildly guess.
After you are able to discern all the information, call the relevant tool."""

class PromptInstructions(BaseModel):
    objective: str
    variables: List[str]
    constraints: List[str]
    requirements: List[str]

class AgentState(TypedDict):
    messages: Annotated[Sequence[BaseMessage], operator.add]
    sender: str

llm = ChatTongyi(model="qwen-max", api_key=os.getenv("aliyun_API_KEY"), temperature=0)
llm_with_tool = llm.bind_tools([PromptInstructions])

def get_messages_info(messages):
    return [SystemMessage(content=template)] + messages

def chain(state: AgentState):
    result = llm_with_tool.invoke(get_messages_info(state["messages"]))
    return {"messages": [result], "sender": "info"}

prompt_system = """Based on the following requirements, write a good prompt template:\n\n{reqs}"""

def get_prompt_messages(state: AgentState):
    tool_call = None
    other_msgs = []
    for m in state["messages"]:
        if isinstance(m, AIMessage) and m.tool_calls:
            tool_call = m.tool_calls[0]["args"]
        elif isinstance(m, ToolMessage):
            continue
        elif tool_call is not None:
            other_msgs.append(m)
    return {"messages": [SystemMessage(content=prompt_system.format(reqs=tool_call))] + other_msgs, "sender": "prompt"}

prompt_gen_chain = get_prompt_messages

def get_state(state: AgentState) -> Literal["add_tool_message", "info", "__end__"]:
    messages = state["messages"]
    if isinstance(messages[-1], AIMessage) and messages[-1].tool_calls:
        return "add_tool_message"
    elif not isinstance(messages[-1], HumanMessage):
        return END
    return "info"

memory = MemorySaver()
workflow = StateGraph(AgentState)
workflow.add_node("info", chain)
workflow.add_node("prompt", prompt_gen_chain)

@workflow.add_node
def add_tool_message(state: AgentState):
    last_msg = state["messages"][-1]
    return {
        "messages": [ToolMessage(content="Prompt generated!", tool_call_id=last_msg.tool_calls[0]["id"])],
        "sender": "add_tool_message"
    }

workflow.add_conditional_edges("info", get_state)
workflow.add_edge("add_tool_message", "prompt")
workflow.add_edge("prompt", END)
workflow.add_edge(START, "info")

graph = workflow.compile(checkpointer=memory)
graph_png = graph.get_graph(xray=1).draw_mermaid_png()
with open("sub_graph_combine.png", "wb") as f:
    f.write(graph_png)

config = {"configurable": {"thread_id": str(uuid.uuid4())}}

while True:
    user = input("User (q/Q to quit): ")
    if user in {"q", "Q"}:
        print("AI: Byebye")
        break
    output = None
    for output in graph.stream({"messages": [HumanMessage(content=user)], "sender": "user"}, config=config, stream_mode="updates"):
        last_message = next(iter(output.values()))
        print(last_message)
    if output and "prompt" in output:
        print("Done!")