import os

import chromadb
import torch
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter
from sentence_transformers import SentenceTransformer

from utils.document_loader import DocumentLoader


class KnowledgeBuilder:
    def __init__(self, persist_dir="./knowledge_base/vector_db"):

        # 设备选择逻辑（如果需要在多位置使用）
        model = SentenceTransformer(
            "BAAI/bge-m3",
            device="cuda" if torch.cuda.is_available() else "cpu"
        )

        # 初始化嵌入模型
        self.embeddings = model.encode(
            texts=["your text here"],
            normalize_embeddings=True,
            batch_size=32,
            show_progress_bar=False)

        # 初始化文本分割器 - 每段500字符，重叠50字符
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50
        )

        # 创建/加载ChromaDB
        os.makedirs(persist_dir, exist_ok=True)
        self.persist_dir = persist_dir
        self.client = chromadb.PersistentClient(path=persist_dir)
        self.collection = self.client.get_or_create_collection("business_knowledge")
        self.vector_db = Chroma(
            client=self.client,
            collection_name="business_knowledge",
            embedding_function=self.embedding,
            persist_directory=persist_dir
        )

    def build_from_web(self):
        """从网络抓取数据构建知识库"""
        # 加载电商数据
        loader = DocumentLoader()
        ecommerce_docs = loader.fetch_ecommerce_docs()

        # 加载金融数据
        finance_docs = loader.fetch_finance_policies()

        # 处理文档
        all_docs = []
        for doc in ecommerce_docs + finance_docs:
            # 分割文本并添加元数据
            splits = self.text_splitter.create_documents(
                [doc["content"]],
                [{"source": doc["source"]}]
            )
            all_docs.extend(splits)

        # 添加到向量数据库
        self.update_knowledge(all_docs)

    def update_knowledge(self, documents):
        """增量更新知识库"""
        # 准备文档数据
        ids = [f"doc_{hash(doc.page_content)}" for doc in documents]
        metadatas = [doc.metadata for doc in documents]
        contents = [doc.page_content for doc in documents]

        # 批量添加（避免内存溢出）
        batch_size = 100
        for i in range(0, len(ids), batch_size):
            self.collection.add(
                ids=ids[i:i + batch_size],
                documents=contents[i:i + batch_size],
                metadatas=metadatas[i:i + batch_size]
            )

        # 持久化保存
        self.vector_db.persist()
        print(f"知识库已更新，新增 {len(documents)} 个文档片段")

    def search(self, query, k=5):
        """执行语义搜索"""
        return self.vector_db.similarity_search(query, k=k)


# 测试使用
if __name__ == "__main__":
    kb = KnowledgeBuilder()
    kb.build_from_web()
    results = kb.search("退货政策")
    print("搜索示例结果:", results[0].page_content[:100] + "...")
