import logging

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.core.vector_stores import VectorStoreQuery
from llama_index.core.vector_stores.types import VectorStoreQueryMode

from app.core.config import settings
from app.models import Document
from app.services.factory.embedding_factory import EmbeddingsFactory
from app.services.file_services import format_file_name_to_read
from app.utils.bool_utils import convert_bool
from app.utils.str_utils import format_filename

#milvus集合名称+知识库ID
PREFIX_MILVUS_COLLECTION_NAME = 'aip_'

from pymilvus import MilvusClient
from llama_index.vector_stores.milvus import MilvusVectorStore

logger=logging.getLogger(__name__)

class MilvusVectorStoreSession:
    _vector_store: MilvusVectorStore
    _vector_store_index = None

    def __init__(self, kb_id, dimensions=1024, enable_sparse=False) -> None:
        sparse_embedding_function = None
        if convert_bool(enable_sparse):
            sparse_embedding_function = EmbeddingsFactory.create('openai')
        #必须先设置模型，否则创建milvus时候会使用Settings.embed_model获取向量模型
        self.initAIModel()
        self._vector_store = MilvusVectorStore(
            dim=dimensions,
            uri=settings.MILVUS_API_URL,
            token=f"{settings.MILVUS_API_TOKEN}",
            collection_name=self.get_milvus_collection_name(kb_id),
            overwrite=False,#不要设置为 True ,否则删除单个文档时，会删除整个集合，所以不要设置为True
            hybrid_ranker="RRFRanker",
            hybrid_ranker_params={"k": 60},
            enable_sparse=enable_sparse,  #该值为True时，使用稀疏向量检索,需要设置 sparse_embedding_function，为False时，使用稠密向量检索
            sparse_embedding_function=sparse_embedding_function,
        )
        # 获取索引
        self._vector_store_index = VectorStoreIndex.from_vector_store(self._vector_store)

    def initAIModel(self):
        """初始化AI模型"""
        from llama_index.core import Settings
        from llama_index.llms.openai import OpenAI
        Settings.llm = OpenAI(
            model=settings.OPENAI_MODEL,
            api_base=settings.OPENAI_API_BASE,
            api_key=settings.OPENAI_API_KEY,
            temperature=0.7)

        embedding_llm = EmbeddingsFactory.create('openai')
        Settings.embed_model = embedding_llm
        #向量稀疏模型
        self.embedding_llm = embedding_llm


    def loadFileToSimpleDirectoryReader(self, file_path):
        """只支持单个文件处理"""
        input_files = [file_path]
        docs = SimpleDirectoryReader(input_files=input_files).load_data()
        return docs[0]

    def format_doc_id(self,kb_doc_id):
        """自定义向量存储中的doc_id,方便删除操作"""
        return f'kb_doc_{kb_doc_id}'

    def addDoc(self, doc:Document):
        """新增到向量数据库"""
        file_path=format_file_name_to_read(sub_path=doc.sub_path)
        store_doc = self.loadFileToSimpleDirectoryReader(file_path)
        store_doc_id = self.format_doc_id(doc.id)
        store_doc.doc_id=store_doc_id
        from llama_index.core import Settings
        print(Settings.embed_model)
        logger.debug(f'ID= {doc.id} store_doc_id={store_doc_id}  的文件 {doc.description}[{doc.sub_path}],准备开始进行向量化  ')
        self._vector_store_index.insert(document=store_doc)
        logger.info(f'ID= {doc.id}  store_doc_id={store_doc_id} 的文件 {doc.description}[{doc.sub_path}],向量化完成  ')

    def delDoc(self,doc:Document):
        """删除向量数据库的对应记录"""
        store_doc_id = self.format_doc_id(doc.id)
        logger.debug(f'ID= {doc.id} store_doc_id={store_doc_id} {doc.description}[{doc.sub_path}],准备删除向量化数据  ')
        self._vector_store_index.delete_ref_doc(ref_doc_id=store_doc_id)
        logger.info(f'ID= {doc.id}  store_doc_id={store_doc_id} 的文件 {doc.description}[{doc.sub_path}],删除向量化数据完成  ')


    def query(self,query_str: str):
        """查询知识库"""
        # query_embedding=self.embedding_llm.get_query_embedding(query_str)
        # res=self._vector_store.query(
        #     VectorStoreQuery(query_str=query_str, query_embedding=query_embedding, mode=VectorStoreQueryMode.DEFAULT))
        #

        query_engine =self._vector_store_index.as_query_engine(vector_store_query_mode=VectorStoreQueryMode.DEFAULT)
        # 执行查询
        res = query_engine.query(query_str)
        return res

    @staticmethod
    def get_milvus_collection_name(kb_id: int):
        return f'{PREFIX_MILVUS_COLLECTION_NAME}{kb_id}'

    @staticmethod
    def drop_collection(kb_id: int):
        client = None
        try:
            client = MilvusClient(uri=settings.MILVUS_API_URL, token=settings.MILVUS_API_TOKEN)
            collname = MilvusVectorStoreSession.get_milvus_collection_name(kb_id)
            if client.has_collection(collname):
                client.drop_collection(collname)
        finally:
            if client:
                client.close()


class LlamaIndexVectorStoreMilvusSessions:
    _sessions: map = {}

    def __init__(self):
        pass

    def remove_session(self, kb_id):
        if kb_id in self._sessions:
            del self._sessions[kb_id]

    def get_session(self, kb_id, dimensions, enable_sparse) -> MilvusVectorStoreSession:
        # if kb_id in self._sessions:
        #     return self._sessions[kb_id]
        session= MilvusVectorStoreSession(kb_id, dimensions, convert_bool(enable_sparse))
        # self._sessions[kb_id]=session
        return session



livsmSessions = LlamaIndexVectorStoreMilvusSessions()
