from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores.faiss import FAISS
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import OpenAIEmbeddings, ChatOpenAI

from langchain_community.embeddings import OllamaEmbeddings

ollama_embedding = OllamaEmbeddings(
    base_url='http://localhost:11434',
    model='quentinz/bge-large-zh-v1.5:q4_0'
)

from llm import llm

prompt = ChatPromptTemplate.from_template('基于上下文：{context}\n回答：{input}')
template = """Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Use three sentences maximum and keep the answer as concise as possible.
Always say "thanks for asking!" at the end of the answer.

{context}

Question: {question}

Helpful Answer:"""
custom_rag_prompt = PromptTemplate.from_template(template)

output_parser = StrOutputParser()
docs = [Document(page_content="TuGraph是蚂蚁开源的图数据库产品")]
splits = RecursiveCharacterTextSplitter().split_documents(docs)
vectorstore = FAISS.from_documents(splits, ollama_embedding)
retriever = vectorstore.as_retriever(search_args={'k': 5})

chain_no_context = RunnablePassthrough() | llm | output_parser
chain = (
    {"context": retriever, "input": RunnablePassthrough()}
    | prompt | llm | output_parser
)

def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

chain_test =  (
    {"context": retriever | format_docs, "question": RunnablePassthrough()}
    | custom_rag_prompt
    | llm
    | StrOutputParser()
)
# print(chain.invoke('蚂蚁图数据库开源了吗？'))

# 调用Chain
print(chain_no_context.invoke('蚂蚁图数据库开源了吗？'))
print(chain.invoke('蚂蚁图数据库开源了吗？'))
