from enum import Enum
from transformers import AutoTokenizer, AutoModel
from typing import List, Union, Optional
import torch
import numpy as np
from tqdm.auto import tqdm, trange
from loguru import logger

# Load model from HuggingFace Hub
# tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh')
# model = AutoModel.from_pretrained('BAAI/bge-large-zh')

class EncoderType(Enum):
    FIRST_LAST_AVG = 0
    LAST_AVG = 1
    CLS = 2
    POOLER = 3
    MEAN = 4

    def __str__(self):
        return self.name

    @staticmethod
    def from_string(s):
        try:
            return EncoderType[s]
        except KeyError:
            raise ValueError()

class BGEModel:
    def __init__(
            self,
            model_name_or_path: str = "model/bge-large-zh",
            encoder_type: Union[str, EncoderType] = "MEAN",
            max_seq_length: int = 512,
            device: Optional[str] = None,
    ):
        """
        Initializes the base sentence model.

        :param model_name_or_path: The name of the model to load from the huggingface models library.
        :param encoder_type: The type of encoder to use, See the EncoderType enum for options:
            FIRST_LAST_AVG, LAST_AVG, CLS, POOLER(cls + dense), MEAN(mean of last_hidden_state)
        :param max_seq_length: The maximum sequence length.
        :param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if GPU.

        bert model: https://huggingface.co/transformers/model_doc/bert.html?highlight=bert#transformers.BertModel.forward
        BERT return: <last_hidden_state>, <pooler_output> [hidden_states, attentions]
        Note that: in doc, it says <last_hidden_state> is better semantic summery than <pooler_output>.
        thus, we use <last_hidden_state>.
        """
        self.model_name_or_path = model_name_or_path
        encoder_type = EncoderType.from_string(encoder_type) if isinstance(encoder_type, str) else encoder_type
        if encoder_type not in list(EncoderType):
            raise ValueError(f"encoder_type must be in {list(EncoderType)}")
        self.encoder_type = encoder_type
        self.max_seq_length = max_seq_length
        self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
        self.bert = AutoModel.from_pretrained(model_name_or_path)
        if device is None:
            device = "cuda" if torch.cuda.is_available() else "cpu"
            # device = "cpu"
        self.device = torch.device(device)
        logger.debug("Use device: {}".format(self.device))
        self.bert.to(self.device)
        self.results = {}  # Save training process evaluation result

    def __str__(self):
        return f"<SentenceModel: {self.model_name_or_path}, encoder_type: {self.encoder_type}, " \
               f"max_seq_length: {self.max_seq_length}, emb_dim: {self.get_sentence_embedding_dimension()}>"
    def get_sentence_embedding_dimension(self):
        """
        Get the dimension of the sentence embeddings.

        Returns
        -------
        int or None
            The dimension of the sentence embeddings, or None if it cannot be determined.
        """
        # Use getattr to safely access the out_features attribute of the pooler's dense layer
        return getattr(self.bert.pooler.dense, "out_features", None)

    def get_sentence_embeddings(self, input_ids, attention_mask, token_type_ids):
        """
        Returns the model output by encoder_type as embeddings.

        Utility function for self.bert() method.
        """
        model_output = self.bert(input_ids, attention_mask, token_type_ids, output_hidden_states=True)

        if self.encoder_type == EncoderType.FIRST_LAST_AVG:
            # Get the first and last hidden states, and average them to get the embeddings
            # hidden_states have 13 list, second is hidden_state
            first = model_output.hidden_states[1]
            last = model_output.hidden_states[-1]
            seq_length = first.size(1)  # Sequence length

            first_avg = torch.avg_pool1d(first.transpose(1, 2), kernel_size=seq_length).squeeze(-1)  # [batch, hid_size]
            last_avg = torch.avg_pool1d(last.transpose(1, 2), kernel_size=seq_length).squeeze(-1)  # [batch, hid_size]
            final_encoding = torch.avg_pool1d(
                torch.cat([first_avg.unsqueeze(1), last_avg.unsqueeze(1)], dim=1).transpose(1, 2),
                kernel_size=2).squeeze(-1)
            return final_encoding

        if self.encoder_type == EncoderType.LAST_AVG:
            sequence_output = model_output.last_hidden_state  # [batch_size, max_len, hidden_size]
            seq_length = sequence_output.size(1)
            final_encoding = torch.avg_pool1d(sequence_output.transpose(1, 2), kernel_size=seq_length).squeeze(-1)
            return final_encoding

        if self.encoder_type == EncoderType.CLS:
            sequence_output = model_output.last_hidden_state
            return sequence_output[:, 0]  # [batch, hid_size]

        if self.encoder_type == EncoderType.POOLER:
            return model_output.pooler_output  # [batch, hid_size]

        if self.encoder_type == EncoderType.MEAN:
            """
            Mean Pooling - Take attention mask into account for correct averaging
            """
            token_embeddings = model_output.last_hidden_state  # Contains all token embeddings
            input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
            final_encoding = torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
                input_mask_expanded.sum(1), min=1e-9)
            return final_encoding  # [batch, hid_size]
    def encode(
                self,
                sentences: Union[str, List[str]],
                batch_size: int = 64,
                show_progress_bar: bool = False,
                convert_to_numpy: bool = True,
                convert_to_tensor: bool = False,
                device: str = None,
        ):
            """
            Returns the embeddings for a batch of sentences.

            :param sentences: str/list, Input sentences
            :param batch_size: int, Batch size
            :param show_progress_bar: bool, Whether to show a progress bar for the sentences
            :param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
            :param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
            :param device: Which device to use for the computation
            """
            if device is None:
                device = "cuda" if torch.cuda.is_available() else "cpu"
            if convert_to_tensor:
                convert_to_numpy = False
            input_is_string = False
            if isinstance(sentences, str) or not hasattr(sentences, "__len__"):
                sentences = [sentences]
                input_is_string = True

            all_embeddings = []
            length_sorted_idx = np.argsort([-len(s) for s in sentences])
            sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
            for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
                sentences_batch = sentences_sorted[start_index: start_index + batch_size]
                # Compute sentences embeddings
                with torch.no_grad():
                    embeddings = self.get_sentence_embeddings(
                        **self.tokenizer(sentences_batch, max_length=self.max_seq_length,
                                        padding=True, truncation=True, return_tensors='pt').to(device)
                    )
                embeddings = embeddings.detach()
                if convert_to_numpy:
                    embeddings = embeddings.cpu()
                all_embeddings.extend(embeddings)
            all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
            if convert_to_tensor:
                all_embeddings = torch.stack(all_embeddings)
            elif convert_to_numpy:
                all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])

            if input_is_string:
                all_embeddings = all_embeddings[0]

            return all_embeddings
# # Tokenize sentences
# encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# # for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages)
# # encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt')

# # Compute token embeddings
# with torch.no_grad():
#     model_output = model(**encoded_input)
#     # Perform pooling. In this case, cls pooling.
#     sentence_embeddings = model_output[0][:, 0]
# # normalize embeddings
# sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1)
# print("Sentence embeddings:", sentence_embeddings)

if __name__ == "__main__":
    # queries = '市民反映：居民区附近工地周末施工，噪音污染十分严重，且时间为上午六点至晚上九点左右。工作日时间同上，晚可至十点半左右。诉求：请管理部门核实制止扰民行为。',
    queries = "建设交通类,绿化市容,城市管理,无证设摊,市民来电反映:上述地址苏河驿站，每天21:89-凌晨5点左右，无证设摊摊位，1个摊位摆放三四张桌子，没人处理，诉求:请管理部门尽快整治(需回复)"
    # text,score,zhipai_class = get_sim_text(queries,threshold=0.8,top_k=1)
    # print(zhipai_class)
    embedder = BGEModel("model/bge-large-zh")
    corpus_embeddings =  embedder.encode(queries)
    print("zhipai_class")