from langchain_core.documents import Document
from langchain_experimental.graph_transformers import LLMGraphTransformer
from langchain_openai import ChatOpenAI,OpenAIEmbeddings
from langchain_neo4j import Neo4jGraph,GraphCypherQAChain
from langchain_core.messages import HumanMessage
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_milvus import Milvus
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser

from langgraph.graph import StateGraph,START,MessagesState,END

from typing_extensions import Literal
from typing import TypedDict
from PIL import Image as PILImage

import io

# 打开文件，并赋予读取模式 'r'
with open(r"D:\dev\python\ai\LangGraph\doc\company.txt","r",encoding="utf8") as file:
    # 读取文件的全部内容
    content = file.read()
    # print(content)

documents = [Document(page_content=content)]
# print(documents)

# 创建图数据库示例
graph = Neo4jGraph(url='neo4j+s://6baf1fbb.databases.neo4j.io',  # 替换为自己的
                  username="neo4j",  # 替换为自己的
                  password="mTVxAfT7omeUH5dm7t1pXHesjvNVi7bqprou9h-E9fY", #替换为自己的
                  database="neo4j" # 替换为自己的
                 )

api_key = "sk-6S0PtpNia71gjcfwSsDPsJ9mGqsVPr2XRQzAx1dHbJS7RW4t"
api_base="https://chatapi.littlewheat.com/v1"

llm = ChatOpenAI(model="gpt-4o",api_key=api_key,base_url=api_base,temperature=0)

graph_transformer = LLMGraphTransformer(
    llm=llm,
    allowed_nodes=["公司", "产品", "技术", "市场", "活动", "合作伙伴"],    # 可以自定义节点
    allowed_relationships=["推出", "参与", "合作", "位于", "开发"],       # 可以自定义关系
)

# graph_transformer = LLMGraphTransformer(llm=llm)
# graph_documents = graph_transformer.convert_to_graph_documents(documents)
# graph.add_graph_documents(graph_documents)
#
# print(f"Graph documents: {len(graph_documents)}")
# print(f"Nodes from 1st graph doc:{graph_documents[0].nodes}")
# print(f"Relationships from 1st graph doc:{graph_documents[0].relationships}")


cypher_chain = GraphCypherQAChain.from_llm(
    graph=graph,
    cypher_llm=llm,
    qa_llm=llm,
    validate_cypher=True,
    verbose=True,
    allow_dangerous_requests=True
)
# response = cypher_chain.invoke("苹果公司开发了什么")
# response = cypher_chain.invoke("都有哪些公司在我的数据库中？")
# print(response)

class AgentState(MessagesState):
    next:str

def graph_kg(state:AgentState):
    messages = state["messages"][-1]

    response = cypher_chain.invoke(messages.content)
    final_response = [HumanMessage(content=response["result"], name="graph_kg")]   # 这里要添加名称
    return {"messages": final_response}

chunk_size = 250
chunk_overlap = 30

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=chunk_size,
    chunk_overlap=chunk_overlap
)
splits = text_splitter.split_documents(documents)
# print(splits)

embeddings = OpenAIEmbeddings(
    model="text-embedding-3-large",api_key=api_key,base_url=api_base
)

vectorstore = Milvus.from_documents(
    documents=splits,
    collection_name="company_milvus",
    embedding=embeddings,
    connection_args={
        "uri": "https://in03-bd05f9a8e26068c.serverless.aws-eu-central-1.cloud.zilliz.com",
        "user": "db_bd05f9a8e26068c",
        "password": "Th2-sYLV?]T{v9M7",
    }
)

# 提示
prompt = PromptTemplate(
    template="""You are an assistant for question-answering tasks. 
    Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. 
    Use three sentences maximum and keep the answer concise:
    Question: {question} 
    Context: {context} 
    Answer: 
    """,
    input_variables=["question", "context"],
)

# 构建传统的RAG Chain
rag_chain = prompt | llm | StrOutputParser()

# 运行
question = "我的知识库中都有哪些公司信息"

# 构建检索器
retriever = vectorstore.as_retriever(search_kwargs={"k": 1})

# 执行检索
docs = retriever.invoke("question")

# print(docs)

# generation = rag_chain.invoke({"context": docs, "question": question})
# print(generation)

def vec_kg(state:AgentState):
    messages = state["messages"][-1]
    response = rag_chain.invoke(messages.content)
    final_response = [HumanMessage(content=response["result"], name="vec_kg")]  # 这里要添加名称
    return {"messages": final_response}

def chat(state:AgentState):
    messages = state["messages"][-1]
    model_response = llm.invoke(messages.content)
    final_response = [HumanMessage(content=model_response.content,name="chat")] # 这里要添加名称
    return {"messages":final_response}

def coder(state:AgentState):
    messages = state["messages"][-1]
    model_response = llm.invoke(messages.content)
    final_response = [HumanMessage(content=model_response.content,name="coder")]
    return {"messages":final_response}

def sqler(state:AgentState):
    messages = state["messages"][-1]
    model_response = llm.invoke(messages.content)
    final_response = [HumanMessage(content=model_response.content,name="sqler")]
    return {"messages":final_response}

members = ["chat", "coder", "sqler","graph_kg", "vec_kg"]
options = members + ["FINISH"]

class Router(TypedDict):
    """Worker to route to next. If no workers needed, route to FINISH"""
    next:Literal[*options]


def supervisor(state: AgentState):
    system_prompt = (
        "You are a supervisor tasked with managing a conversation between the"
        f" following workers: {members}.\n\n"
        "Each worker has a specific role:\n"
        "- chat: Responds directly to user inputs using natural language.\n"
        "- graph_kg: Stores market and company information, built on a graph-based knowledge base, excels at answering broad and comprehensive questions.\n"
        "- vec_kg: Stores market and company information, constructed on a traditional semantic retrieval knowledge base, excels at answering detailed and fine-grained questions.\n"
        "Given the following user request, respond with the worker to act next."
        " Each worker will perform a task and respond with their results and status."
        " When finished, respond with FINISH."
    )

    messages = [{"role": "system", "content": system_prompt}, ] + state["messages"]

    response = llm.with_structured_output(Router).invoke(messages)

    next_ = response["next"]

    if next_ == "FINISH":
        next_ = END

    return {"next": next_}

builder = StateGraph(AgentState)
builder.add_node("supervisor", supervisor)
builder.add_node("chat", chat)
builder.add_node("coder", coder)
builder.add_node("sqler", sqler)
builder.add_node("graph_kg", graph_kg)
builder.add_node("vec_kg", vec_kg)

for member in members:
    # 我们希望我们的工人在完成工作后总是向主管“汇报”
    builder.add_edge(member, "supervisor")

builder.add_conditional_edges("supervisor", lambda state: state["next"])

# 添加开始和节点
builder.add_edge(START, "supervisor")

# 编译图
graph = builder.compile()

def display(image_data):
    image = PILImage.open(io.BytesIO(image_data))
    image.show()

# display(graph.get_graph().draw_mermaid_png())

for chunk in graph.stream({"messages": "都有哪些公司在我的数据库中。"}, stream_mode="values"):
    chunk["messages"][-1].pretty_print()