# pip install fastapi uvicorn

import json
from fastapi import FastAPI
from pydantic import BaseModel, constr
from fastapi.responses import StreamingResponse
from collections.abc import (
    AsyncGenerator,
    AsyncIterator,
    Awaitable,
    Coroutine,
    Iterator,
    Mapping,
    Sequence,
)

from langgraph.graph import MessagesState
from typing import Any, Dict
from langgraph.graph import StateGraph
from langchain_ollama import ChatOllama
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
from langchain_core.tools import tool
from langgraph.prebuilt import ToolNode
from langgraph.graph import START, END

class RequestData(BaseModel):
    user_input: str

app = FastAPI()

llm = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False, base_url="http://127.0.0.1:11434/")

######################### non-streaming version start #############################
@tool
def get_weather(city: str) -> str:
    """Get weather for a given city."""
    print(f"Getting weather for {city}")
    return f"It's always raining in {city}!"

tools = [get_weather]

tool_node = ToolNode(tools)

llm_with_tools = llm.bind_tools(tools)

def call_llm(state: MessagesState) -> Dict[str, Any]:
    """Process input and returns output.

    Can use runtime configuration to alter behavior.
    """
    # response = llm.invoke(state["messages"])
    response = llm_with_tools.invoke(state["messages"])
    return {"messages": [response]}

def should_continue(state: MessagesState):
    messages = state["messages"]
    last_message = messages[-1] # 获取列表最后一个元素的写法
    if last_message.tool_calls:
        return "tools"
    return END

chatbotGraph = (
    StateGraph(MessagesState)
    .add_node("call_llm", call_llm)
    .add_node("tools", tool_node)
    .add_edge(START, "call_llm")
    .add_conditional_edges("call_llm", should_continue, ["tools", END])
    .add_edge("tools", "call_llm")
    .compile(name="cbg")
)

@app.post("/run")
async def run_workflow(data: RequestData):
    result = chatbotGraph.invoke({"messages": [HumanMessage(content=data.user_input)]})
    return result

# curl -X POST "http://127.0.0.1:8000/run" -H "Content-Type: application/json" -d '{"user_input":"什么是 LangGraph？"}'

######################### non-streaming version end #############################

###################### streaming version start #############################
def should_continue2(state: MessagesState):
    messages = state["messages"]
    print("++++++++++++++++")  # 添加调试信息
    print(messages)  # 打印消息以调试
    last_message = messages[-1] # 获取列表最后一个元素的写法
    if last_message.tool_calls:
        return "tools"
    return END

def call_llm_stream(state: MessagesState):
    """流式处理输入并返回输出
    
    使用流式调用LLM并返回增量结果
    """
    # option 1: 使用invoke调用LLM并返回完整结果可以支持stream
    # response = llm.invoke(state["messages"])

    # option 2: 使用stream调用LLM并返回增量结果可以支持stream
    response = llm_with_tools.invoke(state["messages"])
    # 使用astream替代invoke进行流式调用
    # response = llm.stream(state["messages"])
    # for chunk in response:
    #     # print(chunk)
    #     yield chunk  # 使用yield逐块返回结果

    # print("====================")
    # print(response)  # 打印完整响应以调试
    return {"messages": [response]}

chatbotGraph_stream = (
    StateGraph(MessagesState)
    .add_node("call_llm", call_llm_stream)
    .add_node("tools", tool_node)
    .add_edge(START, "call_llm")
    .add_conditional_edges("call_llm", should_continue2, ["tools", END])
    .add_edge("tools", "call_llm")
    .compile(name="cbgs")
)

@app.post("/stream")
async def stream_workflow(data: RequestData):
    async def generate():
        async for result in chatbotGraph_stream.astream(
            {"messages": [HumanMessage(content=data.user_input)]},
            stream_mode="messages"
        ):
            print("++++++++++++++++++++")
            # print(result[0])
            # yield str(result)  # 确保返回字符串格式
            yield result[0].content

    return StreamingResponse(generate(), media_type="text/event-stream")

# curl -N -X POST "http://127.0.0.1:8000/stream" -H "Content-Type: application/json" -d '{"user_input":"什么是 LangGraph？"}'

###################### streaming version end #############################

# uvicorn web_main:app --reload

