from langchain_community.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_milvus import Milvus
from langchain.schema.runnable import RunnablePassthrough
from langchain.prompts import PromptTemplate
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_openai import ChatOpenAI

# 设置Milvus Collection名称
COLLECTION_NAME = 'doc_qa_db'

#初始化WebBaseLoader加载指定URL的文档
loader = WebBaseLoader(
    [
    # "https://milvus.io/docs/overview.md",
    # "https://milvus.io/docs/release_notes.md",
    "https://milvus.io/docs/zh/quickstart.md"
    ]
)

text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=20)

#切割文档
all_splits = text_splitter.split_documents(loader.load())

#初始化模型
embeddings = DashScopeEmbeddings(
    model="text-embedding-v2",
    max_retries=3,
    dashscope_api_key="sk-4f1498f1c0314ba79ea2919bd7a02c4d"
)

#第一种
# vector_store = Milvus.from_documents(
#     documents=all_splits,
#     embedding=embeddings,
#     connection_args={"uri":"http://49.234.21.142:19530"},
#     collection_name = COLLECTION_NAME
# )

#对比上面是不用每一次都插入数据
vector_store = Milvus(
    embeddings,
    connection_args={"uri":"http://49.234.21.142:19530"},
    collection_name = COLLECTION_NAME,
)

#第二种
#创建connection,为阿里云Milvus的访问域名
# connection_args = {"url":"http://49.234.21.142:19530"}
# #创建Connection
# vector_store=Milvus(
#     embedding_function=embeddings,
#     connection_args=connection_args,
#     collection_name=COLLECTION_NAME,
#     drop_old=True #创建集合时，如果集合已经存在，则删除
# ).from_documents(
#     all_splits,
#     embedding= embeddings,
#     collection_name=COLLECTION_NAME,
#     connection_args=connection_args
# )

query = "Milvus最新版本是多少"
docs = vector_store.similarity_search(query,k=3)
print(docs)
for doc in docs:
    print(doc.page_content)

#定义模型
llm = ChatOpenAI(
    model_name="qwen-plus",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    api_key="sk-4f1498f1c0314ba79ea2919bd7a02c4d",
    temperature=0.7
)

#获取检索器
retriver = vector_store.as_retriever()

#定义PromptTemplate,用于构建输入给LLM的prompt，重新丢给大模型进行总结
template = """你是AI文档助手，试用以下上下文来回答最后的问题.如果你不知道答案，就说你不知道，不要试图编造答案。
最多用10句话，并尽可能简洁的回。总是在答案的末尾说“谢谢你的提问！”
{context}
问题:{question}
"""
rag_prompt = PromptTemplate.from_template(template)
#总结：用户输入的问题会同时传给retriver和RunnablePassthrough
#retriver完成检索后，会自动吧结果赋值给context
#检索结果context和用户输入的question一并传给提示词模版prompt_template
#最后传给llm大模型
rag_chain = ({"context": retriver, "question": RunnablePassthrough()} | rag_prompt | llm)
result = rag_chain.invoke(query)
print( result)