import dotenv
import weaviate
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_weaviate import WeaviateVectorStore
from weaviate.auth import AuthApiKey

### 实现了   分割  ---> 存储  ---> 检索  ======》 Rag 全流程
dotenv.load_dotenv()
#1、构建Markdown文档加载器并获取文档列表
loader = UnstructuredMarkdownLoader("./project_api.md")
documents = loader.load()
#2、构建分割器
text_splitter = RecursiveCharacterTextSplitter(
    separators=["\n\n","\n","。|！|？","\.\s|\!\s|\?\s",  # 英文标点符号后面通常需要加空格
        "；|;\s","，|,\s"," ",""],
    chunk_size=500,
    chunk_overlap=50,
    add_start_index=True)
#3、分割文档列表
chunks = text_splitter.split_documents(documents)
#4、存储到向量 数据库
client = weaviate.connect_to_wcs(
    cluster_url="https://1dvjlomnrl2fngujw2flkw.c0.asia-southeast1.gcp.weaviate.cloud",
    auth_credentials=AuthApiKey("QlJmcUxyamFKdElveHl0Zl82QzBuMnF0YnV2UUFNdm5xV0JRTDAzZlRsbWorbmtyRzJEblZQbVZ6QXhNPV92MjAw")
)
#5、将数据存储到向量数据库中
db = WeaviateVectorStore(
    client=client,
    index_name="DataSetDemo",
    text_key="text",
    embedding=OpenAIEmbeddings(model="text-embedding-3-small")
)
db.add_documents(chunks)
# 6、转换检索器
retriever = db.as_retriever()
#7、开始检索
docs = retriever.invoke("什么是llmops")
for doc in docs:
    print("========================================================")
    print(doc)
    print("========================================================")

client.close()