from typing import Dict, List, Optional, Any


from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStoreRetriever
from loguru import logger
from typing_extensions import override

from ai_engine.common.ai_common import split_list
from ai_engine.core.vectorstore.client.opensearch import OpenSearchVectorSearchV2
from ai_engine.core.vectorstore.client.vector_store_client import VectorStoreClient


class OpenSearchClient(VectorStoreClient):
    """根据配置文件获取矢量数据库实例"""
    def __init__(
            self,
            embeddings: Embeddings,
            collection_name: str,
            connect_config: dict):
        super().__init__(embeddings, collection_name, connect_config)

        # OpenSearch
        self._vector_store = OpenSearchVectorSearchV2(
            index_name=self._collection_name,
            embedding_function=self._embeddings,
            opensearch_url=str(self._connect["host"]) + ":" + str(self._connect["port"]),
            http_auth=(self._connect["user"], self._connect["password"]),
            engine="faiss",
            pool_maxsize=100
        )

    @override
    def get_retriever(self, **kwargs: Any) -> VectorStoreRetriever:
        if "search_kwargs" not in kwargs:
            raise ValueError("未配置参数search_kwargs")
        # 设置默认参数，k：(即最近邻的数量)，num_candidates：可提高最终K值的准确性，值越大准确性越高，但相应搜索速度会变慢
        search_kwargs = kwargs["search_kwargs"]
        k = None
        score_threshold = None
        if "k" in search_kwargs:
            k = search_kwargs["k"]
        if "score_threshold" in search_kwargs:
            score_threshold = search_kwargs["score_threshold"]
        if k is None:
            search_kwargs["k"] = 10
        if score_threshold is None:
            search_kwargs["score_threshold"] = 0.6
        logger.debug("向量查询参数=" + str(kwargs))
        return self._vector_store.as_retriever(search_type="similarity_score_threshold", **kwargs)

    @override
    def add_documents(self, docs: List[Document], **kwargs: Any):
        """写入数据到向量数据库，每次写入100条"""
        if docs is None or len(docs) == 0:
            return
        if len(docs) > 100:
            chunked_list = split_list(docs, 100)
        else:
            chunked_list = [docs]
        for chunk in chunked_list:
            self._vector_store.add_documents(documents=chunk)
            if "refresh" in kwargs and kwargs["refresh"]:
                self._vector_store.client.indices.refresh(index=self._collection_name)

        logger.info("新增成功,index_name=" + self._collection_name)

    @override
    def create_collection(self, mappings: Dict[str, str]):
        """创建索引"""
        self._vector_store.client.indices.create(
            index=self._collection_name,
            mappings=mappings
        )
        print("创建索引成功,index_name=" + self._collection_name)

    @override
    def delete_documents(self,
                         body: Any) -> Optional[bool]:
        """删除数据"""
        try:
            if body is None:
                return True
            response = self._vector_store.client.search(
                body=body,
                index=self._collection_name
            )
            if len(response["hits"]["hits"]) <= 0:
                logger.info("没有要删除的数据")
                return True
            self._vector_store.client.delete_by_query(
                index=self._collection_name,
                body=body,
                refresh=True,
                allow_no_indices=True
            )
        except Exception as err:
            logger.error(f"Could not delete documents: {err}")
            return False
        return True

    def del_index(self):
        """删除索引"""
        self._vector_store.client.indices.delete(index=self._collection_name)
        print("删除索引成功,collection_name=" + self._collection_name)

    @override
    def query_documents(self, body: Any) -> List[Document]:
        """查询"""
        try:
            response = self._vector_store.client.search(
                body=body,
                index=self._collection_name
            )
            documents = []
            for hit in response["hits"]["hits"]:
                original_text = hit["_source"]["metadata"]["originalText"]
                hit["_source"]["metadata"].pop("originalText")
                hit["_source"]["metadata"].update({"text": hit["_source"]["text"]})
                documents.append(Document(
                    page_content=original_text,
                    metadata=hit["_source"]["metadata"],
                ))
        except Exception as err:
            logger.error(f"query error: {err}")
            raise err
        return documents




