import dotenv
import weaviate
from langchain.retrievers import MultiQueryRetriever
from langchain_community.embeddings.baidu_qianfan_endpoint import QianfanEmbeddingsEndpoint
from langchain_openai import ChatOpenAI
from langchain_weaviate import WeaviateVectorStore
from weaviate.auth import AuthApiKey
import datetime  # 用于处理时间警告

dotenv.load_dotenv()

# 1. 构建向量数据库与检索器
# 创建客户端连接（使用新的connect_to_weaviate_cloud方法）
client = weaviate.connect_to_weaviate_cloud(
    cluster_url="https://zabwh0mbt4errmvpknamq.c0.asia-southeast1.gcp.weaviate.cloud",
    auth_credentials=AuthApiKey("b2o4OGQxcmptMTZEWmJ5VV9udE5xSXBzQW04dUlDZ0JSS0d1ay9FQlhXdEtyMDR4OUFVNzc0eG9mU3dnPV92MjAw"),
)

db = WeaviateVectorStore(
    client = client,
    index_name = "myleane",
    text_key="text",
    embedding=QianfanEmbeddingsEndpoint(),
)

retriever = db.as_retriever(search_type="mmr")

# 2. 创建所检查检索器
muti_query_retriever = MultiQueryRetriever.from_llm(
    retriever = retriever,
    # 2.创建大语言模型
    llm=ChatOpenAI(model_name="kimi-k2-0711-preview", temperature=0),
    include_original=True,
)

# 3. 执行检索
docs = muti_query_retriever.invoke("关于LLMOps应用配置的文档有哪些")
client.close()
print(docs)
print(len(docs))