import warnings
from typing import Callable, Any, List, Optional, Dict

from langchain_community.vectorstores.opensearch_vector_search import (
    OpenSearchVectorSearch,
    MATCH_ALL_QUERY,
    PAINLESS_SCRIPTING_SEARCH,
    SCRIPT_SCORING_SEARCH
)
from langchain_core.embeddings import Embeddings

# 每个分片默认召回条数
default_k = 100


def _default_approximate_search_query(
        query_vector: List[float],
        k: int = 4,
        vector_field: str = "vector_field",
        score_threshold: Optional[float] = 0.0,
) -> Dict:
    """For Approximate k-NN Search, this is the default query.

    重写次方法，由于size值无法指定，而源码中size=k,  调整K值会影响返回条数。所以给K单独设置默认值。

    @param k:  参数用于指定要返回的最近邻向量的数量。它决定了在搜索结果中要返回的文档数目。
               较大的 "k" 值会导致更多的最近邻被返回，通常会增加查询的计算成本和响应时间，但可能会提高搜索的准确性和召回率。

    """
    return {
        "size": k,
        "min_score": score_threshold,
        "query": {"knn": {vector_field: {"vector": query_vector, "k": default_k}}},
    }


def _approximate_search_query_with_boolean_filter(
        query_vector: List[float],
        boolean_filter: Dict,
        k: int = 4,
        vector_field: str = "vector_field",
        subquery_clause: str = "must",
        score_threshold: Optional[float] = 0.0,
) -> Dict:
    """For Approximate k-NN Search, with Boolean Filter."""
    return {
        "size": k,
        "min_score": score_threshold,
        "query": {
            "bool": {
                "filter": boolean_filter,
                subquery_clause: [
                    {"knn": {vector_field: {"vector": query_vector, "k": default_k}}}
                ],
            }
        },
    }


def _approximate_search_query_with_efficient_filter(
        query_vector: List[float],
        efficient_filter: Dict,
        k: int = 4,
        vector_field: str = "vector_field",
        score_threshold: Optional[float] = 0.0,
) -> Dict:
    """For Approximate k-NN Search, with Efficient Filter for Lucene and
    Faiss Engines."""
    search_query = _default_approximate_search_query(
        query_vector, k=k, vector_field=vector_field, score_threshold=score_threshold
    )
    search_query["query"]["knn"][vector_field]["filter"] = efficient_filter
    return search_query


def _default_script_query(
        query_vector: List[float],
        k: int = 4,
        space_type: str = "l2",
        pre_filter: Optional[Dict] = None,
        vector_field: str = "vector_field",
        score_threshold: Optional[float] = 0.0,
) -> Dict:
    """For Script Scoring Search, this is the default query."""

    if not pre_filter:
        pre_filter = MATCH_ALL_QUERY

    return {
        "size": k,
        "min_score": score_threshold,
        "query": {
            "script_score": {
                "query": pre_filter,
                "script": {
                    "source": "knn_score",
                    "lang": "knn",
                    "params": {
                        "field": vector_field,
                        "query_value": query_vector,
                        "space_type": space_type,
                    },
                },
            }
        },
    }


def _default_painless_scripting_query(
        query_vector: List[float],
        k: int = 4,
        space_type: str = "l2Squared",
        pre_filter: Optional[Dict] = None,
        vector_field: str = "vector_field",
        score_threshold: Optional[float] = 0.0,
) -> Dict:
    """For Painless Scripting Search, this is the default query."""

    if not pre_filter:
        pre_filter = MATCH_ALL_QUERY

    source = __get_painless_scripting_source(space_type, vector_field=vector_field)
    return {
        "size": k,
        "min_score": score_threshold,
        "query": {
            "script_score": {
                "query": pre_filter,
                "script": {
                    "source": source,
                    "params": {
                        "field": vector_field,
                        "query_value": query_vector,
                    },
                },
            }
        },
    }


def __get_painless_scripting_source(
        space_type: str, vector_field: str = "vector_field"
) -> str:
    """For Painless Scripting, it returns the script source based on space type."""
    source_value = (
            "(1.0 + " + space_type + "(params.query_value, doc['" + vector_field + "']))"
    )
    if space_type == "cosineSimilarity":
        return source_value
    else:
        return "1/" + source_value


class OpenSearchVectorSearchV2(OpenSearchVectorSearch):
    """
       重写 OpenSearch
    """

    def __init__(
            self,
            opensearch_url: str,
            index_name: str,
            embedding_function: Embeddings,
            **kwargs: Any,
    ):
        super().__init__(opensearch_url=opensearch_url,
                         index_name=index_name,
                         embedding_function=embedding_function,
                         **kwargs)

    @staticmethod
    def _identity_fn(score: float) -> float:
        return score

    def _select_relevance_score_fn(self) -> Callable[[float], float]:
        """
        The 'correct' relevance function
        may differ depending on a few things, including:
        - the distance / similarity metric used by the VectorStore
        - the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
        - embedding dimensionality
        - etc.

        Vectorstores should define their own selection based method of relevance.
        """
        # All scores from Elasticsearch are already normalized similarities:
        # https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-params
        return self._identity_fn

    # def similarity_search_with_score_by_vector(
    #         self,
    #         embedding: List[float],
    #         k: int = 4,
    #         score_threshold: Optional[float] = 0.0,
    #         **kwargs: Any,
    # ) -> List[Tuple[Document, float]]:
    #     """Return docs and it's scores most similar to the embedding vector.
    #
    #     By default, supports Approximate Search.
    #     Also supports Script Scoring and Painless Scripting.
    #
    #     Args:
    #         embedding: Embedding vector to look up documents similar to.
    #         k: Number of Documents to return. Defaults to 4.
    #         score_threshold: Specify a score threshold to return only documents
    #         above the threshold. Defaults to 0.0.
    #
    #     Returns:
    #         List of Documents along with its scores most similar to the query.
    #
    #     Optional Args:
    #         same as `similarity_search`
    #     """
    #     text_field = kwargs.get("text_field", "text")
    #     metadata_field = kwargs.get("metadata_field", "metadata")
    #
    #     hits = self._raw_similarity_search_with_score_by_vector(
    #         embedding=embedding, k=default_k, score_threshold=score_threshold, **kwargs
    #     )
    #
    #     doc_builder = None
    #     if "doc_builder" in kwargs.keys():
    #         doc_builder = kwargs["doc_builder"]
    #
    #     def default_doc_builder(ht: Dict) -> Document:
    #         return Document(
    #             page_content=ht["_source"].get(text_field, ""),
    #             metadata=ht["_source"].get(metadata_field, {}),
    #         )
    #
    #     doc_builder = doc_builder or default_doc_builder
    #
    #     docs_and_scores = []
    #     for hit in hits:
    #         docs_and_scores.append(
    #             (
    #                 doc_builder(hit),
    #                 hit["_score"],
    #             )
    #         )
    #     return docs_and_scores

    def _raw_similarity_search_with_score_by_vector(
            self,
            embedding: List[float],
            k: int = 4,
            score_threshold: Optional[float] = 0.0,
            **kwargs: Any,
    ) -> List[dict]:
        """Return raw opensearch documents (dict) including vectors,
        scores most similar to the embedding vector.

        By default, supports Approximate Search.
        Also supports Script Scoring and Painless Scripting.

        Args:
            embedding: Embedding vector to look up documents similar to.
            k: Number of Documents to return. Defaults to 4.
            score_threshold: Specify a score threshold to return only documents
            above the threshold. Defaults to 0.0.

        Returns:
            List of dict with its scores most similar to the embedding.

        Optional Args:
            same as `similarity_search`
        """
        search_type = kwargs.get("search_type", "approximate_search")
        vector_field = kwargs.get("vector_field", "vector_field")
        index_name = kwargs.get("index_name", self.index_name)
        # text_field = kwargs.get("text_field", "text")
        # metadata_field = kwargs.get("metadata_field", "metadata")
        filter = kwargs.get("filter", {})

        if (
                self.is_aoss
                and search_type != "approximate_search"
                and search_type != SCRIPT_SCORING_SEARCH
        ):
            raise ValueError(
                "Amazon OpenSearch Service Serverless only "
                "supports `approximate_search` and `script_scoring`"
            )

        if search_type == "approximate_search":
            boolean_filter = kwargs.get("boolean_filter", {})
            subquery_clause = kwargs.get("subquery_clause", "must")
            efficient_filter = kwargs.get("efficient_filter", {})
            # `lucene_filter` is deprecated, added for Backwards Compatibility
            lucene_filter = kwargs.get("lucene_filter", {})

            if boolean_filter != {} and efficient_filter != {}:
                raise ValueError(
                    "Both `boolean_filter` and `efficient_filter` are provided which "
                    "is invalid"
                )

            if lucene_filter != {} and efficient_filter != {}:
                raise ValueError(
                    "Both `lucene_filter` and `efficient_filter` are provided which "
                    "is invalid. `lucene_filter` is deprecated"
                )

            if lucene_filter != {} and boolean_filter != {}:
                raise ValueError(
                    "Both `lucene_filter` and `boolean_filter` are provided which "
                    "is invalid. `lucene_filter` is deprecated"
                )

            if (
                    efficient_filter == {}
                    and boolean_filter == {}
                    and lucene_filter == {}
                    and filter != {}
            ):
                if self.engine in ["faiss", "lucene"]:
                    efficient_filter = filter
                else:
                    boolean_filter = filter

            if boolean_filter != {}:
                search_query = _approximate_search_query_with_boolean_filter(
                    embedding,
                    boolean_filter,
                    k=k,
                    vector_field=vector_field,
                    subquery_clause=subquery_clause,
                    score_threshold=score_threshold,
                )
            elif efficient_filter != {}:
                search_query = _approximate_search_query_with_efficient_filter(
                    embedding,
                    efficient_filter,
                    k=k,
                    vector_field=vector_field,
                    score_threshold=score_threshold,
                )
            elif lucene_filter != {}:
                warnings.warn(
                    "`lucene_filter` is deprecated. Please use the keyword argument"
                    " `efficient_filter`"
                )
                search_query = _approximate_search_query_with_efficient_filter(
                    embedding,
                    lucene_filter,
                    k=k,
                    vector_field=vector_field,
                    score_threshold=score_threshold,
                )
            else:
                search_query = _default_approximate_search_query(
                    embedding,
                    k=k,
                    vector_field=vector_field,
                    score_threshold=score_threshold,
                )
        elif search_type == SCRIPT_SCORING_SEARCH:
            space_type = kwargs.get("space_type", "l2")
            pre_filter = kwargs.get("pre_filter", MATCH_ALL_QUERY)
            search_query = _default_script_query(
                embedding,
                k,
                space_type,
                pre_filter,
                vector_field,
                score_threshold=score_threshold,
            )
        elif search_type == PAINLESS_SCRIPTING_SEARCH:
            space_type = kwargs.get("space_type", "l2Squared")
            pre_filter = kwargs.get("pre_filter", MATCH_ALL_QUERY)
            search_query = _default_painless_scripting_query(
                embedding,
                k,
                space_type,
                pre_filter,
                vector_field,
                score_threshold=score_threshold,
            )
        else:
            raise ValueError("Invalid `search_type` provided as an argument")

        response = self.client.search(index=index_name, body=search_query)

        """
        文档结构处理
        向量数据库存储结构
          hit["_source"]["metadata"]["originalText"]  详细文本内容，  text 可以理解为是originalText 的查询关键字
          hit["_source"]["text"]  向量值对应的文本(该文本用来计算向量值)
        Document展示结构调整
        在langchain中  Document的page_content 字段用来作为最终的输出结果给到chatgpt. 但是该字段取的是text 值，
        所以需要使用originalText进行替换
        """
        new_hit = []
        for hit in response["hits"]["hits"]:
            original_text = hit["_source"]["metadata"]["originalText"]
            hit["_source"]["metadata"].pop("originalText")
            hit["_source"]["metadata"].update({"text": hit["_source"]["text"]})
            hit["_source"]["text"] = original_text
            new_hit.append(hit)
        return new_hit
