import operator
from typing import Annotated, Sequence, TypedDict
from langchain_core.messages import BaseMessage

class AgentState(TypedDict):
    messages: Annotated[Sequence[BaseMessage], operator.add]

from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma

from langchain_community.embeddings import OllamaEmbeddings
from langchain.tools.retriever import create_retriever_tool
from langgraph.prebuilt import ToolExecutor

## 入库的知识内容
urls = [
    "https://www.gov.cn/gongbao/content/2019/content_5468932.htm",
]
docs = [WebBaseLoader(url).load() for url in urls]
docs_list = [item for sublist in docs for item in sublist]

text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
    chunk_size=500, chunk_overlap=50
)
doc_splits = text_splitter.split_documents(docs_list)

## 文档加入到向量数据库中
vectorstore = Chroma.from_documents(
    documents=doc_splits,
    collection_name="rag-chroma",
    embedding=OllamaEmbeddings(model="nomic-embed-text"),
)

##创建retriever对象
retriever = vectorstore.as_retriever()

## 创建tool
tool = create_retriever_tool(
    retriever,
    "retrieve_transportation_rules",
    "搜索并返回中国道路交通安全管理条例相关的问题",
)

tools = [tool]
tool_executor = ToolExecutor(tools)


import json
import operator
from typing import Annotated, Sequence, TypedDict
from langchain import hub
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain.tools.render import format_tool_to_openai_function
from langchain_core.utils.function_calling import convert_to_openai_tool
# from langchain_community.chat_models.ollama import ChatOllama
from langchain_core.messages import BaseMessage, FunctionMessage
from langchain.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.pydantic_v1 import BaseModel, Field
# from langchain_openai import ChatOpenAI
from langchain.chat_models import ChatOllama
from langgraph.prebuilt import ToolInvocation
from langchain_core.output_parsers import StrOutputParser
from langchain_experimental.llms.ollama_functions import convert_to_ollama_tool
from langchain_core.prompts import ChatPromptTemplate


def intention_agent(state):
    """
    Args:
        state (messages): The current state
    Returns:
        dict: The updated state with the agent response apended to messages
    """
    print("---CALL intention detecting---")
    messages = state["messages"]
    functions = [convert_to_ollama_tool(t) for t in tools]
    model = ChatOllama(model="llama3.1",temperature=0, streaming=True)
    model = model.bind_tools(functions)
    response = model.invoke(messages)
    # We return a list, because this will get added to the existing list
    return {"messages": [response]}


def retrieve(state):
    """
    Args:
        state (messages): The current state
    Returns:
        dict: The updated state with retrieved docs
    """
    print("---EXECUTE RETRIEVAL---")
    messages = state["messages"]
    # 最后一条消息，一定来自于上一个节点的输出消息，包含function call所需要的参数
    last_message = messages[-1]
    # 从消息里取出function call的函数名和参数，构建一个ToolInvocation
    action = ToolInvocation(
        tool=last_message.additional_kwargs["function_call"]["name"],
        tool_input=json.loads(
            last_message.additional_kwargs["function_call"]["arguments"]
        ),
    )
    # 执行调用结果，并最终返回FunctionMessage对象
    response = tool_executor.invoke(action)
    function_message = FunctionMessage(content=str(response), name=action.tool)
    return {"messages": [function_message]}


# 定义一个QA repsonse生成节点
def generate(state):
    print("---GENERATE---")
    messages = state["messages"]
    question = messages[0].content
    last_message = messages[-1]

    question = messages[0].content
    docs = last_message.content
    # Prompt
    prompt = hub.pull("rlm/rag-prompt")
    model = ChatOllama(model="llama3.1",temperature=0, streaming=True)
    # Chain
    rag_chain = prompt | model | StrOutputParser()
    # Run
    response = rag_chain.invoke({"context": docs, "question": question})
    return {"messages": [response]}


# 定义一个chat response生成节点
def chat_generate(state):
    print("---CHAT---")
    messages = state["messages"]
    question = messages[0].content
    last_message = messages[-1]
    question = messages[0].content
    # Prompt
    prompt = ChatPromptTemplate.from_messages([
        ("system", "You are helpful assistant."),
        ("user", "{question}")
    ])
    model=ChatOllama(model="llama3.1")
    # Chain
    rag_chain = prompt | model | StrOutputParser()
    # Run
    response = rag_chain.invoke({"question": question})
    return {"messages": [response]}


# 判断是否是走知识问答
def should_qa(state):
    print("---INTENTION DETECTION---")
    messages = state["messages"]
    last_message = messages[-1]
    # If there is no function call, then we finish
    if "function_call" not in last_message.additional_kwargs:
        print("---DECISION: chat---")
        return "chat"
    # Otherwise there is a function call, so we continue
    else:
        print("---DECISION: qa---")
        return "qa"


from langgraph.graph import END, StateGraph
# 定义一个图对象
workflow = StateGraph(AgentState)

# 往图里加入节点
workflow.add_node("intention_agent", intention_agent)  # intention_agent
workflow.add_node("retrieve", retrieve)  # retrieval
workflow.add_node("generate", generate)  # generate
workflow.add_node("chat_generate", chat_generate)  # generate



# 设置初始起点
workflow.set_entry_point("intention_agent")


# 加入条件边，通过should_qa的返回值来判断下一个节点
workflow.add_conditional_edges(
    "intention_agent", ##上游节点
    should_qa, ## 判断函数
    {
        "qa": "retrieve", ##根据判断函数的返回值，调用不同的下游节点
        "chat": "chat_generate",
    },
)


# retrieve -> generate 节点的流向
workflow.add_edge("retrieve", "generate")
# generate ->end 节点的流向
workflow.add_edge("generate", END)
# chat_generate ->end 节点的流向
workflow.add_edge("chat_generate", END)


# Compile
app = workflow.compile()


from langchain_core.messages import HumanMessage
inputs = {
    "messages": [
        HumanMessage(
            content="⼩型微型⾮营运载客汽车的年检规则是？"
        )
    ]
}
for output in app.stream(inputs):
    for key, value in output.items():
        if key in ['generate','chat_generate']:
            print(f"Output from node '{key}':")
            print(value['messages'][0])