from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core.node_parser import SentenceSplitter
# from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from .embedding import SiliconFlowEmbedding
import chromadb
import os
from llama_index.llms.deepseek import DeepSeek
# from
from datetime import datetime
from ..models import DocumentMetadata

class DocumentProcessor:
    def __init__(self,chroma_path):
        # 初始化嵌入模型
        self.embed_model =embed_model = SiliconFlowEmbedding(
            model_name="netease-youdao/bce-embedding-base_v1",
            api_key="Bearer sk-ngekgvpblpqccnltgqydfzdxsktivlwmaoyjdfklsdjsadtz"
        )

        # 初始化Chroma客户端
        self.chroma_client = chromadb.PersistentClient(path=chroma_path)
        self.vector_store = ChromaVectorStore(
            chroma_collection=self.chroma_client.get_or_create_collection("rag_docs")
        )

        # 初始化SQLite会话
        # self.db_session = Session()

        # 初始化文本分割器
        self.splitter = SentenceSplitter(
            chunk_size=128,
            chunk_overlap=50,
            separator=" ",
            paragraph_separator="\n\n"
        )

    def process_documents(self, input_dir):
        # 加载文档
        reader = SimpleDirectoryReader(input_dir)
        documents = reader.load_data()
        print("加载文档：", len(documents))
        for doc in documents:
            # 处理元数据
            metadata = self._extract_metadata(doc)

            # 存储元数据到SQLite
            self._store_metadata(metadata)

        # 3. 使用更精细的分割器
        # self.splitter = TokenTextSplitter(
        #     chunk_size=512,
        #     chunk_overlap=20,
        #     separator=" "
        # )
        # 分割文档为节点
        nodes = self.splitter.get_nodes_from_documents(documents)

        # 创建存储上下文
        storage_context = StorageContext.from_defaults(
            vector_store=self.vector_store
        )

        # 构建索引
        index = VectorStoreIndex(
            nodes,
            storage_context=storage_context,
            embed_model=self.embed_model
        )

        return index

    def _extract_metadata(self, document):
        """提取并丰富文档元数据"""
        file_path = document.metadata.get("file_path", "")
        file_name = os.path.basename(file_path)
        file_size = os.path.getsize(file_path)

        return {
            "doc_id": document.doc_id,
            "file_name": file_name,
            "file_type": os.path.splitext(file_name)[1].lower(),
            "file_size": file_size,
            "page_count": document.metadata.get("page_label", 1),
            "created_at": datetime.now().isoformat(),
            "additional_metadata": document.metadata,
            "summary": document.text[:200] + "..." if len(document.text) > 200 else document.text,
            "text_preview": document.text[:1000]  # 存储部分文本用于预览
        }

    def _store_metadata(self, metadata):
        """存储元数据到SQLite"""
        record = DocumentMetadata(
            doc_id=metadata["doc_id"],
            file_name=metadata["file_name"],
            file_type=metadata["file_type"],
            file_size=metadata["file_size"],
            page_count=metadata["page_count"],
            created_at=metadata["created_at"],
            additional_metadata=metadata["additional_metadata"],
            summary=metadata["summary"],
            text_preview=metadata["text_preview"]
        )

        self.db_session.add(record)
        self.db_session.commit()

    def get_query_engine(self):
        """获取查询引擎"""
        index = VectorStoreIndex.from_vector_store(
            self.vector_store,
            embed_model=self.embed_model
        )
        llm = DeepSeek(model="deepseek-chat", api_key="sk-7dfdb92a297e47d498ad94ca385ab51c")
        return index.as_query_engine(
            similarity_top_k=3,
            response_mode="compact",
            llm=llm,
            verbose=True
        )