#这将执行之前定义的lambda函数，并在输入字典的基础上加'processed' 字段
#最后输出处理后的字典
#output = chain.invoke({"num": 5})
#print(output)

from langchain_community.embeddings import DashScopeEmbeddings
from langchain_milvus import  Milvus
from langchain_core.documents import Document
from langchain_core.runnables import RunnablePassthrough
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI

#初始化模型
embeddings = DashScopeEmbeddings(
    model="text-embedding-v2",
    max_retries=3,
    dashscope_api_key="sk-4f1498f1c0314ba79ea2919bd7a02c4d"
)

document_1=Document(
    page_content="Langchain支持多种数据库集成，小弟课堂的AI大课.",
    metadata={"source":"xdclass.net/doc1"},
)
document_2=Document(
    page_content="Milvus擅长处理向搜索,小滴课堂的AI大课",
    metadata={"source":"xdclass.net/doc2"},
)

documents = [document_1, document_2]

vector_store = Milvus.from_documents(
    documents=documents,
    embedding=embeddings,
    connection_args={"uri":"http://49.234.21.142:19530"},
    collection_name = "runnable_test1",
)

#默认是 similarity search
retriever = vector_store.as_retriever(search_kwargs={"k": 3})

prompt = ChatPromptTemplate.from_template("""
基于上下文回答：{context} \n问题:
{question}
""")

#初始化模型
model = ChatOpenAI(
    model_name = "qwen-plus",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    api_key="sk-005c3c25f6d042848b29d75f2f020f08",
    temperature=0.7
)

chain = {
    "context": retriever,
    "question": RunnablePassthrough() #直接传递用户问题，透传

} |prompt | model

result = chain.invoke("LangChain支持数据库吗?")

print(result)