import os.path
from typing import Dict, List
from FlagEmbedding import FlagModel
from gensim.models import TfidfModel
from gensim.corpora import Dictionary
from gensim.similarities import MatrixSimilarity
import numpy as np
from nltk.translate.meteor_score import meteor_score
from nltk.translate.bleu_score import sentence_bleu
from rouge_score import rouge_scorer
import textdistance
import bert_score
from gensim.models import KeyedVectors
from bleurt import score
import bleurt
import torch
import re
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModel, BertForMaskedLM
from rank_bm25 import BM25Okapi
from pyemd import emd
from FlagEmbedding import BGEM3FlagModel, FlagReranker, LLMEmbedder
from torch.nn import functional as F
from collections import defaultdict
from moverscore_v2 import word_mover_score

class Similarity:
    def similarity(self, sent1, sent2):
        pass

    def dot(self, classics, community_text):
        scores = []
        for classic, community in zip(classics, community_text):
            scores.append(self.similarity(classic, community))
        return np.array(scores)

    def sim_mat(self, sentences):
        sim = []
        for i in range(len(sentences)):
            scores = []
            for j in range(len(sentences)):
                scores.append(self.similarity(sentences[i], sentences[j]))
            sim.append(scores)
        return np.array(sim)


class BGEReranker(Similarity):
    def __init__(self, path):
        self.model = FlagReranker(path, use_fp16=True)

    def similarity(self, sent1, sent2):
        score = self.model.compute_score([sent1, sent2], normalize=True)
        return score

    def sim_mat(self, sentences):
        sim = [[-1] * len(sentences)] * len(sentences)
        for i in range(len(sentences)):
            for j in range(len(sentences)):
                if sim[i][j] == -1:
                    sim[i][j] = self.similarity(sentences[i], sentences[j])
                    sim[j][i] = sim[i][j]
        return np.array(sim)

    def dot(self, classics, community_text):
        sim = []
        for i in range(len(classics)):
            sim.append(self.similarity(classics[i], community_text[i]))
        return np.array(sim)


class LLMEmbed(Similarity):
    def __init__(self, path):
        self.model = LLMEmbedder(path, use_fp16=True)
        self.task = "convsearch"

    def sim_mat(self, sentences):
        key_embeddings = self.model.encode_keys(sentences, task=self.task)
        return key_embeddings @ key_embeddings.T

    def dot(self, classics, community_text):
        classic_mat = self.model.encode_queries(classics, task=self.task)
        community_mat = self.model.encode_keys(community_text, task=self.task)
        sim = np.sum(classic_mat * community_mat, -1, keepdims=False)
        return sim


class BGEm3(Similarity):
    def __init__(self, path, encoding_method, weights=[0.4, 0.2, 0.4]):
        self.model = BGEM3FlagModel(path, use_fp16=True)
        self.encoding_method = encoding_method
        self.weights = weights

    def similarity(self, sent1, sent2):
        score = self.model.compute_score([(sent1, sent2)], weights_for_different_modes=[0.4, 0.2, 0.4])

    def encode(self, sentences):
        encodings = self.model.encode(sentences, return_dense=True, return_sparse=True, return_colbert_vecs=True)

    def sparse_score(self, encodings_1, encodings_2):
        # weights_1 = self.model.convert_id_to_token(encodings_1['lexical_weights'])
        lexical_score = self.model.compute_lexical_matching_score(encodings_1['lexical_weights'],
                                                                  encodings_2['lexical_weights'])
        return lexical_score

    @staticmethod
    def sparse_sim_score(weight1, weight2):
        set1 = set(weight1.keys())
        set2 = set(weight2.keys())
        score = 0
        for char in set1.intersection(set2):
            score += max(weight1[char], weight2[char])
        return score

    def sim_mat(self, sentences):
        if self.encoding_method == 'dense':
            embeddings = self.model.encode(sentences)['dense_vecs']
            return embeddings @ embeddings.T
        elif self.encoding_method == 'sparse':
            weights = self.model.encode(sentences, return_dense=False, return_sparse=True, return_colbert_vecs=False)[
                'lexical_weights']
            sim = [[-1] * len(sentences)] * len(sentences)
            for i in range(len(sentences)):
                for j in range(len(sentences)):
                    sim[i][j] = self.model.compute_lexical_matching_score(weights[i], weights[j])
                    # sim[i][j] = self.sparse_sim_score(weights[i], weights[j])
            assert min([min(s) for s in sim]) >= 0
            return np.array(sim)
        elif self.encoding_method == 'colbert':
            embeddings = \
                self.model.encode(sentences, return_dense=False, return_sparse=False, return_colbert_vecs=True)[
                    'colbert_vecs']

            sim = [[-1] * len(sentences)] * len(sentences)
            for i in range(len(sentences)):
                for j in range(len(sentences)):
                    sim[i][j] = max(self.model.colbert_score(embeddings[i], embeddings[j]), 0)
            assert min([min(s) for s in sim]) >= 0, sim
            return np.array(sim)
        else:
            outputs = self.model.encode(sentences, return_dense=True, return_sparse=True, return_colbert_vecs=True)
            embeddings = self.model.encode(sentences)['dense_vecs']
            sim = (embeddings @ embeddings.T).tolist()
            for i in range(len(sentences)):
                for j in range(len(sentences)):
                    sim[i][j] = sim[i][j] * self.weights[0] + \
                                self.model.compute_lexical_matching_score(outputs['lexical_weights'][i],
                                                                          outputs['lexical_weights'][j]) * \
                                self.weights[1] + \
                                self.model.colbert_score(outputs['colbert_vecs'][i], outputs['colbert_vecs'][j]) * \
                                self.weights[2]
            return np.array(sim)

    def dot(self, classics, community_text):
        if self.encoding_method == 'dense':
            classic_mat = self.model.encode(classics)['dense_vecs']
            community_mat = self.model.encode(community_text)['dense_vecs']
            sim = np.sum(classic_mat * community_mat, -1, keepdims=False)
            return sim
        elif self.encoding_method == 'sparse':
            classic_weights = self.model.encode(classics)['lexical_weights']
            community_weights = self.model.encode(community_text)['lexical_weights']
            sim = []
            for classic_weight, community_weight in zip(classic_weights, community_weights):
                sim.append(self.model.compute_lexical_matching_score(classic_weight, community_weight))
            return np.array(sim)
        elif self.encoding_method == 'colbert':
            classic_vecs = self.model.encode(classics)['colbert_vecs']
            community_vecs = self.model.encode(community_text)['colbert_vecs']
            sim = []
            for classic_vec, community_vec in zip(classic_vecs, community_vecs):
                sim.append(self.model.colbert_score(classic_vec, community_vec))
            return np.array(sim)
        else:
            sentence_pairs = [(classics[i], community_text[i]) for i in range(len(classics))]
            return np.array(self.model.compute_score(sentence_pairs, weights_for_different_modes=self.weights)[
                                'colbert+sparse+dense'])


class BGE(Similarity):
    def __init__(self, path='/home/liwei/bge'):
        self.model = FlagModel(path,
                               query_instruction_for_retrieval="为这个句子生成表示以用于查找相关解释：",
                               use_fp16=True)  # Setting use_fp16 to True speeds up computation with a slight performance degradation

    def similarity(self, sent1, sent2):
        sentences_1 = [sent1]
        sentences_2 = [sent2]
        embeddings_1 = self.model.encode(sentences_1)
        embeddings_2 = self.model.encode(sentences_2)
        similarity = embeddings_1 @ embeddings_2.T
        return similarity

    def dot(self, classics, community_text):
        classic_mat = self.model.encode(classics)
        community_mat = self.model.encode(community_text)
        sim = np.sum(classic_mat * community_mat, -1, keepdims=False)
        return sim

    def encode(self, sentences: list):
        emb_mat = self.model.encode(sentences)
        return emb_mat

    def sim_mat(self, sentences):
        all_sent_mat = self.model.encode(sentences)
        return all_sent_mat @ all_sent_mat.T


class SentenceBert(Similarity):
    def __init__(self, path='sentence-transformers/distiluse-base-multilingual-cased-v1'):
        self.model = SentenceTransformer(path)

    def dot(self, classics, community_text):
        classic_mat = self.model.encode(classics)
        community_mat = self.model.encode(community_text)
        sim = (np.sum(classic_mat * community_mat, -1, keepdims=False) + 1) / 2.
        return sim

    def sim_mat(self, sentences):
        emb = self.model.encode(sentences)
        return self.model.similarity(emb, emb).numpy()


class BertScore(Similarity):
    def __init__(self, model_type):
        self.model_type = model_type

    def similarity(self, sent1, sent2):
        P, R, F1 = bert_score.score([sent1], [sent2], model_type=self.model_type, verbose=False, device="cuda:0")
        return F1.item()

    def sim_mat(self, sentences):
        cands = []
        refs = []
        id_map = []
        for i in range(len(sentences)):
            for j in range(i + 1, len(sentences)):
                cands.append(sentences[i])
                refs.append(sentences[j])
                id_map.append((i, j))
        assert len(cands) == len(id_map) and len(refs) == len(id_map)
        P, R, F1 = bert_score.score(cands, refs, model_type=self.model_type, verbose=False, device="cuda:0",
                                    batch_size=min(32, len(id_map)))
        sim = [[-1] * len(sentences)] * len(sentences)
        for i in range(len(F1)):
            x, y = id_map[i]
            sim[y][x] = sim[x][y] = F1[i].item()
        for i in range(len(sentences)):
            sim[i][i] = 1
        return np.array(sim)


class TFIDF(Similarity):
    def sim_mat(self, sentences):
        sentences = [list(words) for words in sentences]
        dct = Dictionary(sentences)
        corpus = [dct.doc2bow(words) for words in sentences]
        model = TfidfModel(corpus)
        tfidf_corpus = model[corpus]
        index = MatrixSimilarity(tfidf_corpus)
        sim = np.array([index[tfidf_sent] for tfidf_sent in tfidf_corpus])
        return sim

    def dot(self, classics, community_text):
        sentences = [list(words) for words in classics + community_text]
        dct = Dictionary(sentences)
        corpus = [dct.doc2bow(words) for words in sentences]
        model = TfidfModel(corpus)
        classic_corpus = [dct.doc2bow(list(words)) for words in classics]
        community_corpus = [dct.doc2bow(list(words)) for words in community_text]
        tfidf_corpus = model[classic_corpus + community_corpus]
        index = MatrixSimilarity(tfidf_corpus)
        # classic在前面，数量和community相同，这样做是因为index里需要包含query中的词，否则会超过下标
        sim = np.array([index[tfidf_sent][i] for i, tfidf_sent in enumerate(community_corpus)])
        return sim


class Rouge(Similarity):
    def __init__(self):
        self.scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=False)

    def similarity(self, s1, s2):
        rouge_1, rouge_2, rouge_l = self.scorer.score(' '.join(list(s1)), ' '.join(list(s2))).values()
        return sum(s.fmeasure for s in [rouge_1, rouge_2]) / 2.


class TextDistance(Similarity):
    def __init__(self, method):
        self.method = method

    def similarity(self, s1, s2):
        if self.method == "lcs":
            sim = textdistance.lcsstr.similarity(s1, s2)
            longer_length = max(len(s1), len(s2))
            if longer_length == 0:
                print(s1, s2)
                return 0
            sim = sim / float(longer_length)
        elif self.method == "jaccard":
            sim = textdistance.jaccard.similarity(set(list(s1)), set(list(s2)))
        return sim


class SIMCSE(Similarity):
    def __init__(self, path):
        self.tokenizer = AutoTokenizer.from_pretrained(os.path.join("./plms", path))
        self.model = AutoModel.from_pretrained(os.path.join("./plms", path))

    def encode(self, text):
        encodings = self.tokenizer(text, truncation=True, padding=True, return_tensors='pt', add_special_tokens=True)
        outputs = self.model(**encodings)
        pooled_outputs = outputs.pooler_output
        return pooled_outputs

    def sim_mat(self, sentences):
        embeddings = self.encode(sentences)
        norm_tensor = F.normalize(embeddings, dim=1)
        sim = ((norm_tensor @ norm_tensor.T + 1) / 2.).detach().cpu().numpy()
        assert np.min(sim) >= 0
        return sim

    def dot(self, classics, community_text):
        sent1_sentence_vector = self.encode(classics)
        sent2_sentence_vector = self.encode(community_text)
        cls_score = ((F.cosine_similarity(sent1_sentence_vector,
                                          sent2_sentence_vector) + 1) / 2.).detach().cpu().numpy()
        return cls_score


class MyReferenceSimilarity(Similarity):
    def __init__(self, path):
        self.tokenizer = AutoTokenizer.from_pretrained(os.path.join("/home/liwei/zhuangzi-convergence/plms", path))
        self.model = AutoModel.from_pretrained(os.path.join("/home/liwei/zhuangzi-convergence/plms", path))
        self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
        self.model.to(self.device)

    @staticmethod
    def join(lists):
        result = []
        for l in lists:
            result.extend(l)
        return result

    @staticmethod
    def ngram(sentence, n):
        return [sentence[i:i + n] for i in range(len(sentence) - n + 1)]

    @staticmethod
    def replace_punctuation(sentence):
        pattern = re.compile(u'[，；、：,;《》]+')
        return re.sub(pattern, ' ', sentence)

    def encode(self, text):
        encodings = self.tokenizer(text, truncation=False, padding=False, return_tensors='pt', add_special_tokens=True)
        encodings = {key: value.to(self.device) for key, value in encodings.items()}
        outputs = self.model(**encodings)
        pooled_outputs = outputs.pooler_output.squeeze(0)
        last_layer_outputs = outputs.last_hidden_state[0, 1:].squeeze(0)
        return pooled_outputs, last_layer_outputs

    def similarity(self, sent1, sent2):
        sent1_sentence_vector, sent1_last_layer_vector = self.encode(sent1)
        sent2_sentence_vector, sent2_last_layer_vector = self.encode(sent2)
        cls_score = ((torch.nn.functional.cosine_similarity(sent1_sentence_vector,
                                                            sent2_sentence_vector,
                                                            dim=0) + 1) / 2.).detach().cpu().numpy()
        sent1_unigrams, sent1_bigrams, sent1_trigrams, sent1_quadgrams = self.get_ngrams(sent1)

        sent2_unigrams, sent2_bigrams, sent2_trigrams, sent2_quadgrams = self.get_ngrams(sent2)
        bigram_bert_match_score = self.sequence_match_bert(sent1_bigrams, sent2_bigrams, sent1_last_layer_vector,
                                                           sent2_last_layer_vector)
        trigram_bert_match_score = self.sequence_match_bert(sent1_trigrams, sent2_trigrams, sent1_last_layer_vector,
                                                            sent2_last_layer_vector)
        quadgram_bert_match_score = self.sequence_match_bert(sent1_quadgrams, sent2_quadgrams, sent1_last_layer_vector,
                                                             sent2_last_layer_vector)
        return (
                0.2 * bigram_bert_match_score + 0.3 * trigram_bert_match_score + 0.5 * quadgram_bert_match_score) * 0.5 + cls_score

    def get_ngrams(self, sentence):
        clean_text = self.replace_punctuation(sentence)
        unigrams = MyReferenceSimilarity.join([MyReferenceSimilarity.ngram(s, 1) for s in clean_text.split(' ')])
        bigrams = self.join([self.ngram(s, 2) for s in clean_text.split(' ')])
        trigrams = self.join([self.ngram(s, 3) for s in clean_text.split(' ')])
        quadgrams = self.join([self.ngram(s, 4) for s in clean_text.split(' ')])
        return unigrams, bigrams, trigrams, quadgrams

    @staticmethod
    def sequence_match_bert(candidate, reference, candidate_bert, reference_bert):

        candidate_set = set(candidate)
        reference_set = set(reference)
        overlap = candidate_set.intersection(reference_set)
        if len(overlap) == 0:
            return 0
        score = 0
        for ngram in overlap:
            c_index = candidate.index(ngram)
            r_index = reference.index(ngram)
            c_vector = candidate_bert[c_index:c_index + len(ngram)].view(-1)
            r_vector = reference_bert[r_index:r_index + len(ngram)].view(-1)
            if c_vector.shape != r_vector.shape:
                continue
            score += ((torch.nn.functional.cosine_similarity(c_vector, r_vector, dim=0) + 1) / 2.).item()
        return min(score / float(len(reference_set)), 1)


class NLTK(Similarity):
    def __init__(self, method):
        self.method = method

    def similarity(self, s1, s2):
        if self.method == 'bleu':
            score = sentence_bleu([list(s1)], list(s2))
        elif self.method == 'meteor':
            score = meteor_score([list(s1)], list(s2))
        return score


class WordMover(Similarity):
    def __init__(self, path):
        self.model = KeyedVectors.load_word2vec_format(path, binary=False)

    def similarity(self, s1, s2):
        distance = self.word_mover_distance(list(s1), list(s2))
        return 1. / (1 + distance)

    @staticmethod
    def pad_sentence(sentence, target_length, padding_vector):
        current_length = len(sentence)
        if current_length < target_length:
            return sentence + [padding_vector] * (target_length - current_length)
        return sentence

    def word_mover_distance(self, words1, words2):
        # 获取单词的向量表示，如果单词不在词向量模型中，则忽略
        vectors1 = [self.model[word] if word in self.model else None for word in words1]
        vectors2 = [self.model[word] if word in self.model else None for word in words2]

        # 去除 None 值（对应不在词向量模型中的单词）
        vectors1 = [np.array(vec, dtype=np.float64) for vec in vectors1 if vec is not None]
        vectors2 = [np.array(vec, dtype=np.float64) for vec in vectors2 if vec is not None]
        long_length = max(len(words1), len(words2))

        # 如果两个文档都没有有效单词（都在词向量模型之外），则返回无穷大距离
        if not vectors1 or not vectors2:
            return float('inf')

        # 计算归一化的权重，这里假设每个单词在文档中的权重相等
        weight1 = np.array([1 / len(vectors1)] * len(vectors1) + [0] * (long_length - len(vectors1)), dtype=np.float64)
        weight2 = np.array([1 / len(vectors2)] * len(vectors2) + [0] * (long_length - len(vectors2)), dtype=np.float64)

        vectors1 = self.pad_sentence(vectors1, long_length, np.zeros(self.model.vector_size, dtype=np.float64))
        vectors2 = self.pad_sentence(vectors2, long_length, np.zeros(self.model.vector_size, dtype=np.float64))
        # 计算两个文档中单词的距离矩阵
        distance_matrix = np.array(
            [np.array([np.sum((v1 - v2) ** 2) for v2 in vectors2]) for v1 in vectors1])

        # 使用 EMD 算法计算距离
        return emd(weight1, weight2, distance_matrix)


class MoverScore(Similarity):
    def similarity(self, s1, s2):
        pass

    def sim_mat(self, sentences):
        idf_dict_hyp = defaultdict(lambda: 1.)
        idf_dict_ref = defaultdict(lambda: 1.)
        sim_mat = []
        for sent in sentences:
            references = sentences
            hypothesis = [sent] * len(references)
            assert len(sentences) == len(hypothesis), (len(sentences), len(hypothesis))
            scores = word_mover_score(references, hypothesis, idf_dict_ref, idf_dict_hyp, stop_words=[], n_gram=1,
                                  remove_subwords=False)
            sim_mat.append(scores)
        return np.array(sim_mat)


class BleuRT(Similarity):
    def __init__(self, checkpoint):
        self.model = bleurt.score.BleurtScorer(checkpoint)

    def similarity(self, s1, s2):
        scores = self.model.score(references=[s1], candidates=[s2])
        assert isinstance(scores, list) and len(scores) == 1
        return scores[0]


class MyBM25(Similarity):
    def similarity(self, s1, s2):
        return BM25Okapi(list(s1)).get_scores([list(s2)])[0]

    def sim_mat(self, sentences):
        corpus = [list(s) for s in sentences]
        bm25_instance = BM25Okapi(corpus)
        sim = []
        for s in corpus:
            sim.append(bm25_instance.get_scores(s))
        return np.array(sim)


def sim_model_dot(sim_model: Dict[str, Similarity], classics, community_text):
    sim = None
    for model in sim_model.values():
        score = model.dot(classics, community_text)
        if sim is None:
            sim = score
        else:
            sim += score
    return sim


def sim_model_sim_mat(sim_model: Dict[str, Similarity], sentences):
    sim = None
    for model in sim_model.values():
        score = model.sim_mat(sentences)
        if sim is None:
            sim = score
        else:
            sim += score
    return sim


if __name__ == "__main__":
    scorer = rouge_scorer.RougeScorer(['rouge1', 'rougeL'], use_stemmer=True)
    scores = scorer.score('The quick brown fox jumps over the lazy dog',
                          'The quick brown dog jumps on the log.')
    print(float(scores['rouge1'].fmeasure))
