from langchain_community.embeddings import DashScopeEmbeddings
from  langchain_milvus import Milvus
from  langchain_openai import  ChatOpenAI
from langchain_community.document_loaders import TextLoader
from langchain.retrievers.multi_query import  MultiQueryRetriever
from langchain_text_splitters import RecursiveCharacterTextSplitter

import logging

#设置日志记录的基本配置
logging.basicConfig()
logging.getLogger("langchain.retrievers.multi_query").setLevel(logging.INFO)
loader = TextLoader("data/qa.txt",encoding="utf-8")
data = loader.load()

#RAG流程
#1.切割数据
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
splits = text_splitter.split_documents(data)

#2初始化模型
embeddings = DashScopeEmbeddings(
    model="text-embedding-v2",
    max_retries=3,
    dashscope_api_key="sk-4f1498f1c0314ba79ea2919bd7a02c4d"
)

#3创建向量数据库(from_documents会重复执行，又插入数据)
vector_store = Milvus.from_documents(
    documents=splits,
    embedding=embeddings,
    connection_args={"uri":"http://49.234.21.142:19530"},
    collection_name = "multi_query_test",
)

#第一个问题
question = "介绍一下捕梦网的内容"

#定义模型
llm = ChatOpenAI(
    model_name="qwen-plus",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    api_key="sk-4f1498f1c0314ba79ea2919bd7a02c4d",
    temperature=0.7
)

#从语言模型中创建多路查询检索器
rfl_result = MultiQueryRetriever.from_llm(
    retriever=vector_store.as_retriever(),
    llm=llm,
)

results = rfl_result.invoke(question)

print(len( results))
for result in results:
    print(f"内容: {result.page_content}元数据: {result.metadata}")

