import asyncio
import os
import shutil
import pickle
import hashlib

from langchain_community.vectorstores import Chroma
from langchain.schema import Document
from langchain_core.tools import BaseTool
from langchain_mcp_adapters.client import MultiServerMCPClient
from sington import EmbeddingsClient

PERSIST_DIR = "./chroma_tools_db"
HASH_FILE = os.path.join(PERSIST_DIR, "tools.hash")


async def main():
    # 1. 初始化 MCP client，拿到 tools 列表
    mcp_client = MultiServerMCPClient({
        "wms-scesrv": {
            "url": "http://47.102.147.217:28012/scesrv/sse",
            "transport": "sse",
            "headers": {"X-Auth-token": "eef7TvPm5XsZ31CJW7uKAQOvP6kL6RQp"}
        },
        "wms-wmssrv": {
            "url": "http://47.102.147.217:30052/wmssrv/sse",
            "transport": "sse",
            "headers": {"X-Auth-token": "eef7TvPm5XsZ31CJW7uKAQOvP6kL6RQp"}
        },
    })
    tools: list[BaseTool] = await mcp_client.get_tools()

    # 2. 计算当前工具列表的哈希（根据 name+description）
    tools_data = [(tool.name, tool.description or "") for tool in tools]
    current_hash = hashlib.md5(pickle.dumps(tools_data)).hexdigest()

    # 3. 读取上次存储的哈希
    last_hash = None
    if os.path.exists(HASH_FILE):
        with open(HASH_FILE, "r") as f:
            last_hash = f.read().strip()

    # 5. 加载本地 embedding 模型
    embeddings = await EmbeddingsClient.get_instance()

    # 4. 决定是加载已有向量库，还是重建并持久化
    if current_hash == last_hash and os.path.exists(PERSIST_DIR):
        # 4a. 未变化：直接加载
        vectorstore = Chroma(
            embedding_function=embeddings,  # ← 核心：给它 embedding
            persist_directory=PERSIST_DIR
        )
        print("工具集未变化，直接加载本地 Chroma 数据库。")
    else:
        # 4b. 发生变化：删除旧库、重新向量化并持久化
        if os.path.exists(PERSIST_DIR):
            shutil.rmtree(PERSIST_DIR)

        # 6. 构建 Document 列表
        ids = [tool.name for tool in tools]
        metadatas = [{"args_schema": str(getattr(tool, "args_schema", None))} for tool in tools]
        docs = [
            Document(page_content=tool.description or "", metadata=meta)
            for tool, meta in zip(tools, metadatas)
        ]

        # 7. 从文档创建向量库并持久化
        vectorstore = Chroma.from_documents(
            documents=docs,
            embedding=embeddings,
            ids=ids,
            persist_directory=PERSIST_DIR
        )
        vectorstore.persist()

        # 8. 写入新的哈希
        os.makedirs(PERSIST_DIR, exist_ok=True)
        with open(HASH_FILE, "w") as f:
            f.write(current_hash)

        print("工具集已更新，重新构建并持久化 Chroma 数据库。")

    # 9. 相似度检索
    user_query = "帮我关闭一下SO订单号为:A10038的订单号"
    results = vectorstore.similarity_search(user_query, k=5)

    # 10. 打印结果
    for doc in results:
        print("=== id:", doc.metadata.get("id", "unknown"))
        print("metadata:", doc.metadata)
        print("page_content:", doc.page_content)
        print()


if __name__ == "__main__":
    asyncio.run(main())
