# -*- coding: utf-8 -*-
from uuid import uuid4
from typing import List, Tuple, Optional, Union, Dict
from loguru import logger
from langchain_core.documents import Document

from utils.documents_utils import deduplicate_documents_generator
from service.es_service import ESConnection


# 这个文件就是核心的es索引构建和检索
# 这个就是入es的入口
def add_documents_to_es(store, documents: List[Document]) -> list[str]:
    # logger.error(f"Debug: 写入ES的文档: {documents[0].metadata}")
    # print(f"Debug: 写入ES的文档: {documents[0].metadata}")
    uuids = [str(uuid4()) for _ in range(len(documents))]
    result = store.add_documents(documents=documents, ids=uuids)
    return result


def similarity_search_with_score(store, query: str, k: int = 10, query_type="kb", file_ids=None) \
        -> List[Document]:
    references = reference_list = []
    if file_ids and len(file_ids) and query_type == "kb":
        reference_list: List[Tuple[Document, float]] = store.similarity_search_with_score(query=query, filter=[
            {"terms": {"metadata.file_id.keyword": file_ids}}], k=k)
    elif file_ids and len(file_ids) and query_type == "clause":
        reference_list: List[Tuple[Document, float]] = store.similarity_search_with_score(query=query, filter=[
            {"term": {"metadata.file_id.keyword": file_ids}}], k=k)

    if len(reference_list):
        reference_doc_list = [doc for doc, score in reference_list]
        references = list(deduplicate_documents_generator(reference_doc_list, revert_table=True))
    return references


def ik_match_search(es, target: str, index_name: str, file_ids: List = None, top_k: int = 10) \
        -> List[Document]:
    if file_ids:
        query_body = {
            'query': {
                "bool": {
                    "must": [
                        {
                            "match": {
                                "text":
                                    {
                                        "query": target,
                                        "analyzer": "ik_smart"
                                    }
                            }
                        },
                        {
                            "terms":
                                {
                                    "metadata.file_id.keyword": file_ids
                                }
                        }

                    ]
                }
            },
            'size': top_k
        }
    else:
        query_body = {
            'query': {
                "match": {
                    "text":
                        {
                            "query": target,
                            "analyzer": "ik_smart"
                        }
                }

            },
            'size': top_k
        }

    reference_list: List[Document] = es.search_documents(query_body=query_body, index_name=index_name)
    reference_list: List[Document] = list(deduplicate_documents_generator(reference_list, revert_table=True))
    return reference_list


def es_search_by_chunk_id(es, chunk_id: str):
    es_doc = es.get(doc_id=chunk_id, error_trace=False, ignore=[404])
    if es_doc:
        return es_doc


def es_search_by_file_id(es, file_id: str):
    pass


def es_update_file_by_id(es, file_id: str):
    pass


def es_delete_file_by_id(es, file_id: str):
    pass


def es_update_chunk_by_id(es_list: Union[List, ESConnection], chunk_id: str, content: str):
    def update_chunk(es, new_doc, idxnm: str):
        flag = es.update(doc_id=chunk_id, body=new_doc, idxnm=idxnm)
        if flag:
            return True
        else:
            return False

    if isinstance(es_list, List):
        for es in es_list:
            doc = es_search_by_chunk_id(es, chunk_id)
    else:
        pass


def es_delete_chunk_by_id(es_list: Union[List, ESConnection], chunk_id: str) -> bool:
    def delete_chunk(es: ESConnection, idxnm: str):
        flag = es.delete(doc_id=chunk_id, idxnm=idxnm)
        if flag:
            return True
        else:
            return False

    if isinstance(es_list, List):
        for es in es_list:
            doc = es_search_by_chunk_id(es, chunk_id)
            if not doc:
                continue
            else:
                return delete_chunk(es)
    else:
        es = es_list
        doc = es_search_by_chunk_id(es, chunk_id)
        if doc:
            return True
        else:
            return False
