import re
from argparse import Namespace
from typing import Any, List

import numpy as np
import torch
from langchain.embeddings.base import Embeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from transformers import AutoModel, AutoTokenizer

from configs.model_config import *


class MyEmbeddings(HuggingFaceEmbeddings):
    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """Compute doc embeddings using a HuggingFace transformer model.

        Args:
            texts: The list of texts to embed.

        Returns:
            List of embeddings, one for each text.
        """
        texts = list(map(lambda x: x.replace("\n", " "), texts))
        embeddings = self.client.encode(texts, normalize_embeddings=True)
        return embeddings.tolist()

    def embed_query(self, text: str) -> List[float]:
        """Compute query embeddings using a HuggingFace transformer model.

        Args:
            text: The text to embed.

        Returns:
            Embeddings for the text.
        """
        text = text.replace("\n", " ")
        embedding = self.client.encode(text, normalize_embeddings=True)
        return embedding.tolist()

    @staticmethod
    def normalize_score(score):
        return 25 * (4 - score)

    def rerank(self, query, docs, scores):
        return query, docs, scores


class LuotuoEmbeddings(Embeddings):
    def __init__(self, **kwargs: Any):
        self.tokenizer = AutoTokenizer.from_pretrained(
            "silk-road/luotuo-bert", trust_remote_code=True
        )
        model_args = Namespace(
            do_mlm=None,
            pooler_type="cls",
            temp=0.05,
            mlp_only_train=False,
            init_embeddings_model=None,
            device="cuda",
        )
        self.model = AutoModel.from_pretrained(
            "silk-road/luotuo-bert", trust_remote_code=True, model_args=model_args
        )

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """Compute doc embeddings using a HuggingFace transformer model.

        Args:
            texts: The list of texts to embed.

        Returns:
            List of embeddings, one for each text.
        """
        texts = list(map(lambda x: re.sub(r"\s+", " ", x), texts))
        inputs = self.tokenizer(
            texts, padding=True, truncation=True, return_tensors="pt"
        )
        # Get the embeddings
        with torch.no_grad():
            embeddings = self.model(
                **inputs, output_hidden_states=True, return_dict=True, sent_emb=True
            ).pooler_output
        # normalize Embeddings
        embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
        return embeddings.cpu().tolist()

    def embed_query(self, text: str) -> List[float]:
        """Compute query embeddings using a HuggingFace transformer model.

        Args:
            text: The text to embed.

        Returns:
            Embeddings for the text.
        """
        text = re.sub(r"\s+", " ", text)
        return self.embed_documents([text])[0]

    @staticmethod
    def normalize_score(score):
        score = 25 * (4 - score)
        score = 100 - (100 - score) * 2
        return score

    def rerank(self, query, docs, scores):
        return query, docs, scores


class RocketQAEmbeddings(Embeddings):
    def __init__(self, **kwargs: Any):
        import rocketqa

        de_conf = {
            "model": "zh_dureader_de_v2",
            "use_cuda": False,
            "device_id": 0,
            "batch_size": 32,
        }
        self.dual_encoder = rocketqa.load_model(**de_conf)
        ce_conf = {
            "model": "zh_dureader_ce_v2",
            "use_cuda": False,
            "device_id": 0,
            "batch_size": 32,
        }
        self.cross_encoder = rocketqa.load_model(**ce_conf)

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """Compute doc embeddings using a HuggingFace transformer model.

        Args:
            texts: The list of texts to embed.

        Returns:
            List of embeddings, one for each text.
        """
        para_embs = self.dual_encoder.encode_para(para=texts)
        para_embs = np.array(list(para_embs))
        return para_embs.tolist()

    def embed_query(self, text: str) -> List[float]:
        """Compute query embeddings using a HuggingFace transformer model.

        Args:
            text: The text to embed.

        Returns:
            Embeddings for the text.
        """
        embddings = self.dual_encoder.encode_query(query=[text])
        return np.array(list(embddings)).tolist()[0]

    @staticmethod
    def normalize_score(score):
        return score

    def rerank(self, query, docs, scores):
        queries = [query] * len(docs)
        contents = [item.page_content for item in docs]
        ranking_score = self.cross_encoder.matching(query=queries, para=contents)
        ranking_score = list(ranking_score)

        # resort
        ranking_score = np.array(ranking_score)
        sorted_indexes = np.argsort(ranking_score)[::-1]
        docs = [docs[i] for i in sorted_indexes]
        scores = [min(ranking_score[i] * 1500, 100) for i in sorted_indexes]
        return docs, scores


def get_embeddings(model_name: str, embedding_device: str) -> Embeddings:
    if model_name == "luotuo":
        return LuotuoEmbeddings(
            model_name=model_name, model_kwargs={"device": embedding_device}
        )
    elif model_name == "rocketqa":
        return RocketQAEmbeddings(
            model_name=model_name, model_kwargs={"device": embedding_device}
        )
    else:
        return MyEmbeddings(
            model_name=embedding_model_dict[model_name],
            model_kwargs={"device": embedding_device},
        )
