import os.path
from uuid import uuid4

from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import Milvus
from langchain_experimental.text_splitter import SemanticChunker
from langchain_ollama import OllamaEmbeddings
from pymilvus import connections, db as milvus_db

from owl_admin.ext import db
from owl_ai.domain.rag_entity import KnowledgeBase
from owl_ai.mapper.rag_knowledge_base_mapper import KnowledgeBaseMapper
from owl_common.sqlalchemy.transaction import Transactional


class RAGKBService:
    """
    RAG知识库
    """

    @classmethod
    @Transactional(db.session)
    def create_knowledge_base(cls, knowledge_base: KnowledgeBase):
        knowledge_id = KnowledgeBaseMapper.insert_one(knowledge_base=knowledge_base)
        return knowledge_id

    @classmethod
    @Transactional(db.session)
    def find_by_id(cls, knowledge_id: int):
        knowledge_base = KnowledgeBaseMapper.find_by_id(knowledge_id)
        return knowledge_base

    @classmethod
    def pdf_file_split(cls, file_path: str):
        pdf_loader = PyPDFLoader(file_path, mode="single")

        pages = []
        for page in pdf_loader.lazy_load():
            pages.append(page.page_content)

        """
        text_splitter = RecursiveCharacterTextSplitter(
            separators=[
                "\n\n",
                "\n",
                " ",
                ".",
                ",",
                "\u200b",  # Zero-width space
                "\uff0c",  # Fullwidth comma
                "\u3001",  # Ideographic comma
                "\uff0e",  # Fullwidth full stop
                "\u3002",  # Ideographic full stop
                "",
            ],
            # Existing args
        )

        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=200,
            chunk_overlap=20,
            length_function=len,
            is_separator_regex=False,
        )"""

        text_splitter = SemanticChunker(
            RAGEmbeddingService.embeddings_model, breakpoint_threshold_type="percentile"
        )

        documents = text_splitter.create_documents(pages)
        return documents

    @classmethod
    def file_split(cls, file_path: str):
        """
        文件分块
        Args:
            file_path: 文件路径

        Returns:

        """
        suffix = "pdf"

        # 处理PDF文件
        if suffix == "pdf":
            return cls.pdf_file_split(file_path)


class FileSplitService:

    @classmethod
    def load_file(cls, file):
        """

        Returns:
            object:
        """
        file_name = file.name
        name, suffix = os.path.split(file_name)

        pages = []
        if suffix == 'pdf':
            pdf_loader = PyPDFLoader(file)
            for page in pdf_loader.lazy_load():
                pages.append(page)
        elif suffix == 'txt':
            pass
        elif suffix == 'docx':
            pass
        elif suffix == 'doc':
            pass
        elif suffix == 'xlsx':
            pass
        elif suffix == 'xls':
            pass
        else:
            raise Exception('不支持的文件类型')

    """
    文件切分服务
    """

    @classmethod
    def split_documents(cls, file):
        """
        切分文档
        Args:
            file: 待切分的文件

        Returns: 切分后的文档列表
        """

        # 载入文档
        pages = cls.load_file(file)

        # 文档切分
        documents = []

        # 向量化并入库
        return documents


class RAGEmbeddingService:
    embeddings_model = OllamaEmbeddings(base_url="http://192.168.21.9:11434",
                                        #api_key="ollama",
                                        model="bge-m3:567m")
    milvus_conn = connections.connect(host="192.168.20.100", port=19530)

    @classmethod
    def document_embedding_save(cls, db_name: str, documents: list):
        existing_databases = milvus_db.list_database()
        if db_name not in existing_databases:
            database = milvus_db.create_database(db_name)

        vectorstore = Milvus(
            embedding_function=cls.embeddings_model,
            collection_name="knowledge_base_test",
            connection_args={"uri": "http://192.168.20.100:19530", "token": "root:Milvus", "db_name": db_name},
            index_params={"index_type": "IVF_FLAT", "metric_type": "L2"},
            consistency_level="Strong",
            drop_old=False,  # set to True if seeking to drop the collection with that name if it exists
        )

        uuids = [str(uuid4()) for _ in range(len(documents))]

        ids = vectorstore.add_documents(documents=documents, ids=uuids)
        return ids

    @classmethod
    def document_embedding_search(cls, db_name: str, query: str):
        vectorstore = Milvus(
            embedding_function=cls.embeddings_model,
            collection_name="knowledge_base_test",
            connection_args={"uri": "http://192.168.20.100:19530", "token": "root:Milvus", "db_name": db_name},
            index_params={"index_type": "IVF_FLAT", "metric_type": "L2"},
            consistency_level="Strong",
            drop_old=False,  # set to True if seeking to drop the collection with that name if it exists
        )
        documents = vectorstore.similarity_search(query=query, k=5)
        return documents
