import os
from langgraph.graph import MessagesState
from typing import Any, Dict
from langgraph.graph import StateGraph
from langchain_ollama import ChatOllama
from langchain_core.messages import AIMessage, SystemMessage
from langchain_core.tools import tool
from langgraph.prebuilt import ToolNode
from langgraph.graph import START, END
from langchain_deepseek import ChatDeepSeek
from langchain_mcp_adapters.client import MultiServerMCPClient

async def make_csghub_graph():
    # llm = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False, base_url="http://127.0.0.1:11434/")
    api_key = os.getenv("ds_api_key", "test-deepseek-key")

    llm = ChatDeepSeek(
        model="deepseek-chat",
        temperature=0.5,
        max_tokens=4096,
        timeout=None,
        max_retries=2,
        # reasoning=False,
        api_key=api_key,
    )

    # @tool
    # def get_weather(city: str) -> str:
    #     """Get weather for a given city."""
    #     print(f"Getting weather for {city}")
    #     return f"It's sunny in {city}!"

    # tools = [get_weather]

    client = MultiServerMCPClient(
        {
            # "inference": {
            #     "url": "https://u-wanghh2000-mcp-inference-tools-my.space.opencsg.com/sse",
            #     "transport": "sse",
            # },
            # "finetune": {
            #     # "url": "https://u-wanghh2000-mcp-finetune-tools-mz.space.opencsg.com/sse",
            #     "url": "http://u-wanghh2003-mcp-finetune-tools-fx.spaces-stg.opencsg.com/sse",
            #     "transport": "sse",
            # },
            # "code": {
            #     "url": "https://u-wanghh2000-mcp-code-tools-n1.space.opencsg.com/sse",
            #     "transport": "sse",
            # },
            # "model": {
            #     "url": "http://u-wanghh2003-model-mcp-tool-dk.spaces-stg.opencsg.com/sse",
            #     "transport": "sse",
            # },
            # "dataset": {
            #     "url": "http://u-wanghh2003-mcp-dataset-tools-h8.spaces-stg.opencsg.com/sse",
            #     "transport": "sse",
            # },
            "free-weather": {
                "url": "http://u-wanghh2003-free-weather-mcp-jn.spaces-stg.opencsg.com/sse",
                "transport": "sse",
            }
            # "local-test-sse": {
            #     "url": "http://127.0.0.1:8000/sse",
            #     "transport": "sse",
            # },
            # "local-test-stream": {
                # "url": "http://127.0.0.1:8000/mcp",
                # "transport": "streamable_http",
            # }
        }
    )
    tools = await client.get_tools()

    tool_node = ToolNode(tools)

    llm_with_tools = llm.bind_tools(tools)

    def call_llm(state: MessagesState) -> Dict[str, Any]:
        """Process input and returns output.

        Can use runtime configuration to alter behavior.
        """
        # response = llm.invoke(state["messages"])
#         system_prompt = """
# ## Role:
# You are a helpful CSGHub action assistant in Chinese. You can use the tools provided to assist users to complete their questions on CSGHub. 

# ## Goals:
# Your goal is to assist users with their questions and actions on CSGHub. You can use the tools provided to assist users to complete their actions. 

# ## constraints:
# - Do not show any token information in your response, and only use it to access the tools.
# - You should not generate any content that is not related to the user's request.
# - You can generate response when you have completed the task or need more information from the user.
# - Access token is required to access some tools. Please ask the user to provide it if needed.
# - Ask user to provide the required information if needed. 
# - 信息不足时提示用户可以进行询问，例如：有哪些可用的runtime framework 或 有哪些可用的resource。
# - 在部署模型推理服务时，优先选择最通用的vllm runtime framework。可用资源优先使用GPU资源，当没有GPU可用时再选择大容量内存的CPU资源，内存不能小于8G，cpu不能少于2核。.
# - 当部署GGUF模型的时候，需要指定模型的gguf文件名称，例如：`model.gguf`。
# - Both ms-swift and llama-factory are available as runtime frameworks when deploy finetune UI service, pick any available one. Only use GPU resources for finetune service.
# - Only ms-swift runtime framework with GPU resources is available for deploying finetune job runs in backend with model and dataset.
# - 当用户请求删除推理或微调服务时，可以要求用户确认删除操作。然后先停止这个服务，再进行删除操作。
# - Ask user to choose dataflow template and tell user the default column is 'text' when create dataflow job.

# """
        system_prompt = """
## Role:
你是一个天气和空气质量查询助手，可以通过城市名称查询天气和空气质量。你通过工具来完成用户的查询请求。

## Goals:
你可以根据用户提供的城市名称查询天气和空气质量，并返回查询结果。You can use the tools provided to assist users to complete their actions. 

## constraints:
- 只能根据用户提供的城市名称查询天气和空气质量。
- 如果用户没有提供城市名称，提示用户输入城市名称。
- 返回的内容尽量精简在50个字以内。

"""

        system_message = SystemMessage(content=system_prompt)
        # ai_message = AIMessage(content="我的令牌是302875ad1114e25850970652523561e414eb33d1")
        # response = llm_with_tools.invoke([system_message, ai_message] + state["messages"])
        response = llm_with_tools.invoke([system_message] + state["messages"])
        return {"messages": [response]}

    def should_continue(state: MessagesState):
        messages = state["messages"]
        last_message = messages[-1]
        if last_message.tool_calls:
            return "tools"
        return END

    csghubAgent = (
        StateGraph(MessagesState)
        .add_node(call_llm)
        .add_node("tools", tool_node)
        .add_edge(START, "call_llm")
        .add_conditional_edges("call_llm", should_continue, ["tools", END])
        .add_edge("tools", "call_llm")
        .compile(name="cbg")
    )

    return csghubAgent
