from asyncio import DefaultEventLoopPolicy
from cmd import PROMPT
from contextlib import contextmanager
from email import message
from multiprocessing.connection import Client
from multiprocessing.reduction import steal_handle
from operator import add
from typing import Type, TypedDict, Annotated

from langchain_core.messages import AnyMessage, HumanMessage
from langgraph.config import get_stream_writer
from langgraph.graph import StateGraph

nodes = ["supervisor" , "travel", "cuplet", "joke", "other"]
# 导入ChatTongyi和相关库
from langchain_community.chat_models.tongyi import ChatTongyi
import os

# 可以通过以下方式设置API密钥
# 方式1: 使用环境变量
# os.environ["DASHSCOPE_API_KEY"] = "你的API密钥"

# 方式2: 直接传入api_key参数
llm = ChatTongyi(
    model="qwen-plus",
    # 如果使用环境变量，可以注释掉下面这行
    # api_key="你的API密钥"
    # 注意：load_key函数没有定义，建议使用上面两种方式之一
)


class State(TypedDict):
    message: Annotated[list[AnyMessage],add]
    type: str

# # 定义结束节点常量
# END = "__end__"

# 定义提示词模板
prompt = "你需要根据用户的问题，判断问题的类型。请直接返回类型，不要返回其他内容。\n类型包括：travel(旅行相关), joke(笑话相关), cuplet(对联相关), other(其他类型)"

def supervisor_node(state : State):
    print(">> supervisor_node")
    writer = get_stream_writer()
    writer({"node", ">>> supervisor_node"})  
    
    # 构建提示词
    prompts = [
        {"role": "system", "content": prompt},
        {"role": "user", "content": state["message"][0].content}
    ]
        
    #根据用户的问题，对问题进行分类，分类结果保存到type中
    if "type" in state:
        return {"type": END}  
    else:
        response = llm.invoke(prompts)
        typeRes = response.content  # 修复变量名一致
        writer({"supervisor_step", f"按问题分类结果：{typeRes}"}) 
        if typeRes in nodes:
            return {"type": typeRes}
        else:
            failed_message = "无法识别的问题类型，请重新提问。"
            raise ValueError(f"Invalid type: {typeRes}")
    
def travel_node(state : State):
    print(">>> travel_node")
    writer = get_stream_writer()
    writer({"node" , ">>> travel_node"})
  def travel_node(state: State):
    client = MultiServerMCPCClient(
        {
            "amap-maps": {
                "command": "npx",
                "args": [
                    "-y",
                    "@amap/amap-maps-mcp-server"
                ],
                "env": {
                    "AMAP_MAPS_API_KEY": "451ad40d0e39453600f2a305e31eabe4"
                },
                "transport": "stdio"
            }
        }
    )

    tools = asyncio.run(client.get_tools())
    agent = create_react_agent(
        model=llm,
        tools=tools
    )
    response = agent.invoke({"messages": prompts})
    writer({"travel_result": response.content})
    return {"messages": [HumanMessage(content=response.content)], "type": "travel"}


def joke_node(state : State):
    print(">>> joke_node")
    writer = get_stream_writer()
    writer({"node", ">>> joke_node"})

    system_prompt = "你是一个笑话大师，根据用户的问题，写一个不超过100个字的笑话。"

    prompts = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": state["messages"][0]}
    ]
    response = llm.invoke(prompts)
    writer({"joke_result": response.content})

    return {"messages": [HumanMessage(content=response.content)], "type":"joke"}
def couplet_node(state: State):
    print(">>> couplet_node")
    writer = get_stream_writer()
    writer({"node": ">>>> couplet_node"})

    prompt_template = ChatPromptTemplate.from_messages([
        ("system", ""),
        ("user", "{text}")
    ])
    query = state["messages"][0]
    if not os.environ.get("DASHSCOPE_API_KEY"):
        os.environ["DASHSCOPE_API_KEY"] = load_key("BAILIAN_API_KEY")

    embedding_model = DashScopeEmbeddings(model="text-embedding-v1")
    redis_url = "redis://localhost:6379"

    config = RedisConfig(
        index_name="couplet",
    )

    samples = []

    scored_results = vector_store.similarity_search_with_score(query, k=10)
    for doc, score in scored_results:
        # print(f"{doc.page_content} - {score}")
        samples.append(doc.page_content)

    prompt = prompt_template.invoke({"samples": samples, "text": query})
    writer({"couplet_prompt": prompt})
    response = llm.invoke(prompt)
    writer({"couplet_result": response.content})
    return {"messages": [HumanMessage(content=response.content)], "type": "couplet"}
def other_node(state : State):
    print(">>> other_node")
    writer = get_stream_writer()
    writer({"node", ">>> other_node"})
    
    return {"message": [HumanMessage(content= "我暂时无法回复这个问题")], "type": "other"}



    
builder = StateGraph(State)
builder.add_node("supervisor_node", supervisor_node)
builder.add_node("travel_node", travel_node)
builder.add_node("joke_node", joke_node)
builder.add_node("cuplet_node", cuplet_node)
builder.add_node("other_node", other_node)

# 添加导入
from langgraph.checkpoint.memory import InMemorySaver

# 定义缺少的变量
START = "__start__"

def routing_func(state: State):
    # 简单的路由函数实现，可以根据需要修改
    if state["type"] == "travel":
        return "travel_node"
    elif state["type"] == "joke":
        return "joke_node"
    elif state["type"] == "cuplet":
        return "cuplet_node"
    elif state["type"] == END:
        return END
    else :
        return "other_node"

builder.add_edge(START, "supervisor_node")
# 在新版langgraph中，使用add_conditional_edges而不是add_conditional_edge
builder.add_conditional_edges(
    "supervisor_node", 
    routing_func,
    {
        "travel_node": "travel_node",
        "joke_node": "joke_node",
        "cuplet_node": "cuplet_node",
        "other_node": "other_node"
    }
)
builder.add_edge("travel_node", "supervisor_node")
builder.add_edge("joke_node", "supervisor_node")
builder.add_edge("cuplet_node", "supervisor_node")
builder.add_edge("other_node", "supervisor_node")

#构建graph
# 在新版langgraph中，checkpoint的使用方式可能不同
# 这里先移除checkpoint参数，使用基本的compile方法
graph = builder.compile()

#执行任务的测试代码
if __name__ == "__main__":
    config = {
        "configurable":{
            "thread_id": "1"
        }
    }
    
    # 使用graph而不是StateGraph来调用stream
    for chunk in graph.stream(
        {"message": [HumanMessage(content="给我讲郭德纲的笑话")], "type": "joke"},
        config,
        stream_mode="custom"
    ):
        print(f"接收到的chunk: {chunk}")
        print(chunk)
    
    
    
    