from typing import Optional, Literal, List

from langchain_core.documents import Document

from ai_engine.core.model.base import InsertDocumentReq
from ai_engine.core.model.chat import CompletionRequest
from ai_engine.core.model.vector_doc import VectorInsertOrUpdateRequest, VectorRemoveRequest
from ai_engine.core.vectorstore.client.vector_store_client import VectorType
from ai_engine.core.vectorstore.vector_data.BaseVectorAdapter import BaseVectorAdapter


def split_text_into_chunks(text, chunk_size):
    """
    Splits the input text into chunks of specified length.

    Parameters:
    - text (str): The input text to be split.
    - chunk_size (int): The length of each chunk.

    Returns:
    - list: A list containing the text chunks.
    """
    chunks: List[str] = []
    if len(text) <= chunk_size:
        chunks.append(text)
        return chunks
    split_list = text.split('\n')
    chunk: str = ""
    for txt in split_list:
        if len(chunk + txt) >= 8000:
            chunks.append(chunk)
            chunk = ""
        else:
            chunk += txt
    if len(chunk) > 0:
        chunks.append(chunk)
    return chunks


class KnowledgeVectorAdapter(BaseVectorAdapter):
    """知识库-向量数据库操作适配器"""

    @classmethod
    def chat_insert_validata(cls,
                             api_type: str,
                             requests: List[VectorInsertOrUpdateRequest]):
        for request in requests:
            if api_type in ["openai", "azure", "volcengineArk"]:
                if len(request.id) < 1 or len(request.id) > 32:
                    raise ValueError(f"id error, required and greater than 1 and less than 32 characters.")
                if len(request.categories) < 1 or len(request.categories) > 32:
                    raise ValueError(f"categories error, required and less than 32 characters.")
                if len(request.vectorText) < 1 or len(request.vectorText) > 10240:
                    raise ValueError(f"vectorText error, required and greater than 1 and less than 10240 characters.")
                if len(request.originalText) < 1 or len(request.originalText) > 10240:
                    raise ValueError(f"originalText error, required and greater than 1 and less than 10240 characters.")
            elif api_type == "qianfan":
                if len(request.id) < 1 or len(request.id) > 32:
                    raise ValueError(f"id error, required and greater than 1 and less than 32 characters.")
                if len(request.categories) < 1 or len(request.categories) > 32:
                    raise ValueError(f"categories error, required and less than 32 characters.")
                if len(request.vectorText) < 1 or len(request.vectorText) > 384:
                    raise ValueError(f"vectorText error, required and greater than 1 and less than 384 characters.")
                if len(request.originalText) < 1 or len(request.originalText) > 450:
                    raise ValueError(f"originalText error, required and greater than 1 and less than 450 characters.")
            else:
                raise ValueError("Invalid api type")

    @classmethod
    def insert_adapter(cls,
                       api_type: str,
                       requests: List[VectorInsertOrUpdateRequest],
                       vector_type: Optional[Literal[
                           VectorType.ELASTICSEARCH,
                           VectorType.OpenSearch]] = None) -> InsertDocumentReq | None:
        """插入知识库数据，向量适配处理，根据不同向量数据库构造查询条件和数据实体.
        :param api_type:  模型类型
        :param requests:  请求参数
        :param vector_type: 向量数据库类型
        """
        docs = []
        ids: List[str] = []
        for index, request in enumerate(requests):
            ids.append(request.id)
            if api_type == "volcengineArk":
                text_length = len(request.vectorText)
                # 超过向量模型支持的文本的长度，进行切割
                if text_length > 4000:
                    split_doc = split_text_into_chunks(request.vectorText, 4000)
                    for sp_txt in split_doc:
                        metadata = {
                            "id": request.id,
                            "categories": request.categories,
                            "originalText": sp_txt
                        }
                        doc = Document(page_content=sp_txt, metadata=metadata)
                        docs.append(doc)
                    continue
            metadata = {
                "id": request.id,
                "categories": request.categories,
                "originalText": request.originalText
            }
            doc = Document(page_content=request.vectorText, metadata=metadata)
            docs.append(doc)
        if len(ids) > 0:
            if vector_type in [VectorType.ELASTICSEARCH, VectorType.OpenSearch]:
                search_kwargs = {
                    "query": {"terms": {"metadata.id": ids}}
                }
            elif vector_type == VectorType.MILVUS:
                ids_expr = ",".join([str(i) for i in ids])
                search_kwargs = {"expr": f"id in [{ids_expr}]"}
            else:
                raise Exception("Unknown vector type")
            return InsertDocumentReq(kwargs=search_kwargs, docs=docs)
        return None

    @classmethod
    def search_kwargs_adapter(cls,
                              question: str,
                              request: CompletionRequest,
                              vector_type: VectorType = VectorType.ELASTICSEARCH):
        """查询数据-适配向量数据库处理"""
        if question is None or question == "":
            question = request.prompt
        search_kwargs = None
        if vector_type in [VectorType.ELASTICSEARCH, VectorType.OpenSearch]:
            search_kwargs = {
                "k": request.vector_search_limit,
                "filter": {
                    "bool": {
                        "must": [
                            {"terms": {"metadata.categories": request.vector_categories}},
                            {"match": {"metadata.originalText": {
                                "analyzer": "ik_max_word",
                                "query": question
                            }}}
                        ]
                    }
                },
                "score_threshold": request.vector_search_distance,
                "doc_builder": cls.doc_builder
            }
        elif VectorType.MILVUS == vector_type:
            # 将集合转换成带"引号的字符串
            categories = ','.join('"{0}"'.format(x) for x in request.vector_categories)
            # 向量数据库搜索条件
            search_kwargs = {
                "k": request.vector_search_limit,
                "param": {'metric_type': 'IP', 'params': {'nprobe': 50},
                          "search_distance": request.vector_search_distance},
                "expr": f'categories in [{categories}]',
                "timeout": 1
            }
        return search_kwargs

    @classmethod
    def delete_adapter(cls,
                       request: VectorRemoveRequest,
                       vector_type: VectorType = VectorType.ELASTICSEARCH):

        search_kwargs = None
        if vector_type in [VectorType.ELASTICSEARCH, VectorType.OpenSearch]:
            if len(request.ids) > 0:
                search_kwargs = {
                    "query": {"terms": {"metadata.id": request.ids}}
                }
            if len(request.categories) > 0:
                search_kwargs = {
                    "query": {"terms": {"metadata.categories": request.categories}}
                }
        elif VectorType.MILVUS == vector_type:
            """删除数据"""
            expr = ""
            if len(request.ids) > 0:
                ids = ','.join('"{0}"'.format(x) for x in request.ids)
                expr = expr + (" and " if len(expr) > 0 else "") + f"id in [{ids}]"
            if len(request.categories) > 0:
                categories = ','.join('"{0}"'.format(x) for x in request.categories)
                expr = expr + (" and " if len(expr) > 0 else "") + f"categories in [{categories}]"
            if expr is not None and len(expr) > 0:
                search_kwargs = {
                    "expr": expr,
                }
        return search_kwargs
