import dotenv
import weaviate
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_core.runnables import ConfigurableField
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_weaviate import WeaviateVectorStore
from weaviate.auth import AuthApiKey
### 实现了   分割  ---> 存储  ---> 检索  ======》 Rag 全流程
dotenv.load_dotenv()
#1、构建向量数据库
client = weaviate.connect_to_wcs(
    cluster_url="https://1dvjlomnrl2fngujw2flkw.c0.asia-southeast1.gcp.weaviate.cloud",
    auth_credentials=AuthApiKey("QlJmcUxyamFKdElveHl0Zl82QzBuMnF0YnV2UUFNdm5xV0JRTDAzZlRsbWorbmtyRzJEblZQbVZ6QXhNPV92MjAw")
)
db = WeaviateVectorStore(
    client=client,
    index_name="DataSetDemo",
    text_key="text",
    embedding=OpenAIEmbeddings(model="text-embedding-3-small")
)
# 2、转换检索
retriever = db.as_retriever(
    search_type="similarity_score_threshold",
    search_kwargs={"k":10, "score_threshold":0.5},
).configurable_fields(
    search_type=ConfigurableField(id = "db_search_type"),
    search_kwargs=ConfigurableField(id = "db_search_kwargs"),
)

# 3、执行基础的相似性检索
similarity_docs = retriever.with_config(configurable={
    "db_search_type":"mmr",
    "db_search_kwargs":{"k":4}
}).invoke("关于应用的配置接口有哪些？")

for doc in similarity_docs:
    print("========================================================")
    print(doc)
    print("========================================================")
client.close()