import logging
from typing import Dict, List, Optional

from scipy.sparse import csr_array
import numpy as np

from pymilvus.model.base import BaseEmbeddingFunction
from pymilvus.model.utils import import_FlagEmbedding, import_datasets
from pymilvus.model.sparse.utils import stack_sparse_embeddings


logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

#pymilvus.model.dense 下面的类才能直接使用
#from pymilvus.model.hybrid import BGEM3EmbeddingFunction
#该类是根据 BGEM3EmbeddingFunction 进行改的,同时参考的OpenAIEmbeddingFunction
class SksBGEM3SparseEmbeddingFunction(BaseEmbeddingFunction):
    def __init__(
            self,
            model_name: str = "BAAI/bge-m3",
            batch_size: int = 16,
            device: str = None,
            normalize_embeddings: bool = True,
            use_fp16: bool = False,
            dimensions: Optional[int] = None,
            **kwargs,
    ):
        import_datasets()
        import_FlagEmbedding()

        try:
            from FlagEmbedding import BGEM3FlagModel
        except AttributeError as e:
            import sys
            if "google.colab" in sys.modules and "ListView" in str(e):
                print(
                    "\033[91mIt looks like you're running on Google Colab. Please restart the session to resolve this issue.\033[0m")
                print(
                    "\033[91mFor further details, visit: https://github.com/milvus-io/milvus-model/issues/32.\033[0m")
            raise

        self.model_name = model_name
        self.batch_size = batch_size
        self.normalize_embeddings = normalize_embeddings
        self.device = device
        self.use_fp16 = use_fp16

        if device == "cpu" and use_fp16 is True:
            logger.warning(
                "Using fp16 with CPU can lead to runtime errors such as 'LayerNormKernelImpl', It's recommended to set 'use_fp16 = False' when using cpu. "
            )

        if "devices" in kwargs:
            device = kwargs["devices"]
            kwargs.pop("devices")

        _model_config = dict(
            {
                "model_name_or_path": model_name,
                "devices": device,
                "normalize_embeddings": normalize_embeddings,
                "use_fp16": use_fp16,
            },
            **kwargs,
        )
        _encode_config = {
            "batch_size": batch_size,
            "return_dense": True,
            "return_sparse": True,
            "return_colbert_vecs": True,
        }
        self._model_config = _model_config
        self._encode_config = _encode_config

        self.model = BGEM3FlagModel(**self._model_config)

    def encode_queries(self, queries: List[str]) -> List[np.array]:
        return self._encode(queries)

    def encode_documents(self, documents: List[str]) -> List[np.array]:
        return self._encode(documents)

    @property
    def dim(self):
        #  {
        #     "dense": self.model.model.model.config.hidden_size,
        #     "colbert_vecs": self.model.model.colbert_linear.out_features,
        #     "sparse": len(self.model.tokenizer),
        # }
        return self.model.model.model.config.hidden_size

    def __call__(self, texts: List[str]) -> List[np.array]:
        return self._encode(texts)

    def _encode_query(self, query: str) -> np.array:
        return self._encode(query)[0]

    def _encode_document(self, document: str) -> np.array:
        return self._encode(document)[0]


    def _encode(self, texts: List[str]):
        output = self.model.encode(sentences=texts, **self._encode_config)
        # results = {}
        return list(output["dense_vecs"])
