import os.path

import uuid
from langchain_ollama import OllamaEmbeddings
from werkzeug.datastructures import FileStorage

from owl_admin.ext import db
from owl_ai.domain.model.knowledge_entity import KnowledgeBaseEntity, KnowledgeFileEntity
from owl_ai.repository.knowledge_repository import KnowledgeRepository, KnowledgeFileRepository
from owl_ai.service.llm_model_service import LLMModelService
from owl_common.owl.config import CONFIG_CACHE
from owl_common.sqlalchemy.transaction import Transactional


class KnowledgeService:

    @classmethod
    @Transactional(db.session)
    def insert(cls, knowledge: KnowledgeBaseEntity):
        """
        插入知识库
        Args:
            knowledge: 知识库

        Returns: 知识库Id
        """
        return KnowledgeRepository.insert(knowledge)

    @classmethod
    @Transactional(db.session)
    def list(cls):
        """
        获取知识库列表
        Returns: 知识库列表
        """
        return KnowledgeRepository.list()

    @classmethod
    def find_by_id(cls, kid: int):
        return KnowledgeRepository.find_by_id(kid)


class VectorStoreService:
    """
    向量化数据服务
    """

    @classmethod
    def vector_store_db(cls, embedding_client, collection_name, vector_store_strategy: dict):
        vector_type = vector_store_strategy.get("type")
        if vector_type == 'milvus':
            from langchain_community.vectorstores import Milvus
            uri = vector_store_strategy.get("uri")
            token = vector_store_strategy.get("token")
            vector_db = Milvus(embedding_function=embedding_client,
                               collection_name=collection_name,
                               metadata_field="metadata",
                               primary_field="id",
                               connection_args={
                                   "uri": uri,
                                   "token": token,
                                   "db_name": "knowledge_base"
                               },
                               index_params={
                                   "index_type": "IVF_FLAT",
                                   "metric_type": "L2",
                               },
                               consistency_level="Strong",
                               drop_old=False)
            return vector_db

    @classmethod
    def similarity_search(cls, knowledge_base: KnowledgeBaseEntity, query: str, top_k: int = 5):
        vector_store_strategy = knowledge_base.vector_store
        embedding_strategy = knowledge_base.embedding_strategy
        embedding_client = cls.embedding_model(embedding_strategy)
        vector_type = vector_store_strategy.get("type")
        if vector_type == 'milvus':
            vector_db = cls.vector_store_db(embedding_client=embedding_client,
                                            collection_name="vector_db_" + str(knowledge_base.id),
                                            vector_store_strategy=vector_store_strategy)
            search_ret = vector_db.similarity_search(query=query, k=int(top_k))
            return search_ret

    @classmethod
    def embedding_model(cls, embedding_strategy: dict):
        """
        字符串片段向量化
        Args:
            embedding_strategy: 向量配置
            docs: 待向量化的文档列表
        Returns: 向量化后的文档列表
        """
        embedding_model_id = embedding_strategy.get("model_id")
        embedding_model = LLMModelService.find_llm_model_by_id(embedding_model_id)

        api_type = embedding_model.api_type

        embedding_client = None
        if api_type == 'ollama':
            api_host = embedding_model.api_host
            model_name = embedding_model.model_name
            embedding_client = OllamaEmbeddings(
                base_url=api_host,
                model=model_name
            )

        return embedding_client

    @classmethod
    def embedding_docs_save(cls, knowledge_base: KnowledgeBaseEntity, file_record: KnowledgeFileEntity, docs: list):
        vector_store_strategy = knowledge_base.vector_store
        embedding_strategy = knowledge_base.embedding_strategy

        embedding_client = cls.embedding_model(embedding_strategy)

        vector_type = vector_store_strategy.get("type")

        if vector_type == 'milvus':
            vector_db = cls.vector_store_db(embedding_client=embedding_client,
                                            collection_name="vector_db_" + str(knowledge_base.id),
                                            vector_store_strategy=vector_store_strategy)

            uuids = [str(uuid.uuid4()) for _ in range(len(docs))]
            metadata = [{"file_id": file_record.id, "file_name": file_record.file_name} for doc in docs]
            ids = vector_db.add_texts(texts=docs, ids=uuids, metadatas=metadata)
            return ids


class KnowledgeFileService:
    knowledge_upload_dir = CONFIG_CACHE['knowledge.upload_dir']

    """
    知识库文件服务
    """

    @classmethod
    def upload_to_disk(cls, file: FileStorage):
        file_name = file.filename
        # 生成知识库文件的上传路径
        _, suffix = file_name.split(".")
        disk_name = str(uuid.uuid4()) + '.' + suffix
        file_path = cls.get_file_path(disk_name)

        # 保存到磁盘中
        file.save(dst=file_path)

        return disk_name, file_path

    @classmethod
    @Transactional(db.session)
    def save_db_record(cls, kid, disk_name, file):
        file_name = file.filename
        _, suffix = file_name.split(".")
        # 保存数据库记录
        knowledge_file_entity = KnowledgeFileEntity()
        knowledge_file_entity.kid = kid
        knowledge_file_entity.file_name = file_name
        knowledge_file_entity.disk_name = disk_name
        knowledge_file_entity.file_type = suffix
        file_id = KnowledgeFileRepository.insert(knowledge_file_entity)

        knowledge_file_entity.id = file_id
        return knowledge_file_entity

    @classmethod
    def get_file_path(cls, filename: str):
        return os.path.join(cls.knowledge_upload_dir, filename)

    @classmethod
    def split_file(cls, documents, chunk_strategy):
        split_type = chunk_strategy.get("type")

        if split_type == "CharacterTextSplitter":
            from langchain_text_splitters import CharacterTextSplitter
            chunk_size = chunk_strategy.get("chunk_size")
            chunk_overlap = chunk_strategy.get("chunk_overlap")
            separator = chunk_strategy.get("separator")
            text_splitter = CharacterTextSplitter(
                chunk_size=chunk_size,
                chunk_overlap=chunk_overlap,
                separator=separator
            )
            return text_splitter.split_text(documents)

    @classmethod
    def load_file(cls, file_path):
        suffix = file_path.split(".")[-1]
        if suffix == 'pdf':
            from langchain_community.document_loaders import PyPDFLoader
            loader = PyPDFLoader(file_path)
            docs = loader.load()
            return str.join("", [doc.page_content for doc in docs])

    @classmethod
    def list(cls, kid):
        return KnowledgeFileRepository.list(kid)