import dotenv
import weaviate
from langchain_unstructured import UnstructuredLoader  # 替换弃用的加载器
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_weaviate import WeaviateVectorStore
from weaviate.auth import AuthApiKey
from langchain_community.embeddings.baidu_qianfan_endpoint import QianfanEmbeddingsEndpoint
import datetime  # 用于处理时间警告

dotenv.load_dotenv()

# 1. 构建加载器与分割器 - 使用新的UnstructuredLoader
loader = UnstructuredLoader("./项目API文档.md")  # 替换原UnstructuredMarkdownLoader

# 修正分隔符写法，使用原始字符串避免转义警告
text_splitter = RecursiveCharacterTextSplitter(
    separators=[
        r"\n\n",
        r"\n",
        r"。|！|？",
        r"\.\s|\!\s|\?\s",
        r"；|;\s",
        r"，|,\s",
        r" ",
        r""
    ],
    is_separator_regex=True,
    chunk_size=500,
    chunk_overlap=50,
    add_start_index=True,
)

# 2. 加载文档并分割
documents = loader.load()
chunks = text_splitter.split_documents(documents)
embedding = QianfanEmbeddingsEndpoint()

# 3. 将数据存储到向量数据库 - 使用新的连接方法
# 创建客户端连接（使用新的connect_to_weaviate_cloud方法）
client = weaviate.connect_to_weaviate_cloud(
    cluster_url="https://zabwh0mbt4errmvpknamq.c0.asia-southeast1.gcp.weaviate.cloud",
    auth_credentials=AuthApiKey("b2o4OGQxcmptMTZEWmJ5VV9udE5xSXBzQW04dUlDZ0JSS0d1ay9FQlhXdEtyMDR4OUFVNzc0eG9mU3dnPV92MjAw"),
)

try:
    # 初始化向量存储
    db = WeaviateVectorStore(
        client=client,
        index_name="myleane",
        text_key="text",
        embedding=embedding
    )
    db.add_documents(chunks)

    # 4. 转换检索器（带阈值的相似性搜索）
    retriever = db.as_retriever(
        search_type="similarity_score_threshold",
        search_kwargs={"k": 10, "score_threshold": 0.5},
    )

    # 5. 检索结果
    documents = retriever.invoke("关于配置接口的信息有哪些")

    print([document.page_content[:50] for document in documents])
    print(len(documents))

finally:
    # 确保连接正确关闭，避免资源泄漏警告
    client.close()