import sys
sys.path.append('../')

import numpy as np
from sentence_transformers import SentenceTransformer
from nltk.stem import PorterStemmer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize
import copy

from resources import BASE_DIR, LANGUAGE
from ref_free_metrics.similarity_scorer import parse_documents

class Supert():
    #docs包含多个文档路径和分句后的文档 初始化部分是将文档进行嵌入转换为矩阵 并且构建文档的伪参考摘要
    def __init__(self, docs, ref_metric='top15', sim_metric='f1'):
        self.bert_model = SentenceTransformer('bert-large-nli-stsb-mean-tokens')#加载bert模型
        self.sim_metric = sim_metric

        # pre-process the documents
        self.sent_info_dic, _, self.sents_weights = parse_documents(docs,None,ref_metric)#对文档解析 不需要BERT模型
        #10个文档内容按照语句进行拆分 并且获取不同语句的权重
        self.all_token_vecs, self.all_tokens = self.get_all_token_vecs(self.bert_model, self.sent_info_dic)
        #将语句的token转换为向量

        self.ref_vecs, self.ref_tokens = self.build_pseudo_ref(ref_metric)

    #获取文档语句的向量表示和token
    def get_all_token_vecs(self, model, sent_info_dict):
        all_sents = [sent_info_dict[i]['text'] for i in sent_info_dict]#all_sents存放了文档中每一句话 list[str(sent),str(sent)...]
        all_vecs, all_tokens = model.encode(all_sents, token_vecs=True)
        #all_vecs存放了每一句话嵌入层转换后的结果  是将每个单词转换为长度1024的向量（包含标点符号 开始符和结束符）
        #all_tokens就存放着每一句话的token
        assert len(all_vecs) == len(all_tokens)
        for i in range(len(all_vecs)):
            assert len(all_vecs[i]) == len(all_tokens[i])
        return all_vecs, all_tokens

    #语句权重已经计算了，选择出权重大于阈值的语句 然后返回去除停用词的词向量
    def build_pseudo_ref(self, ref_metric):
        #保留权重大于阈值的语句
        #sent_info_dic按照文档顺序 每个文档中按照语句顺序排序
        #ref_dic的key对应sent_info_dic的key
        ref_dic = {k:self.sent_info_dic[k] for k in self.sent_info_dic if self.sents_weights[k]>=0.1}
        # get sents in the pseudo ref
        ref_sources = set(ref_dic[k]['doc'] for k in ref_dic)#ref_sources存放了语句来源的文档
        ref_idxs = []
        #如果语句数目较多，那么将同一个文档的语句放在一起
        #ref_idxs存放的是语句在sent_info_dic对应的key 或者是下标
        if len(ref_dic) >= 15:
            # group sentences from the same doc into one pseudo ref
            for rs in ref_sources:
                ref_idxs.append([k for k in ref_dic if ref_dic[k]['doc']==rs])
        else:
            ref_idxs.append([k for k in ref_dic])
        # get vecs and tokens of the pseudo reference
        ref_vecs = []
        ref_tokens = []
        for ref in ref_idxs:
            vv, tt = self.kill_stopwords(ref, self.all_token_vecs, self.all_tokens)
            #vv是去除停用词之后的单词向量 tt是对应token

            ref_vecs.append(vv)
            ref_tokens.append(tt)
        return ref_vecs, ref_tokens

    #进行打分
    def __call__(self, summaries):
        summ_vecs = []
        summ_tokens = []
        #
        if isinstance(summaries[0], str):
            for summ in summaries:
                vv, tt = self.get_token_vecs(self.bert_model, sent_tokenize(summ))
                summ_vecs.append(vv)
                summ_tokens.append(tt)
        elif isinstance(summaries[0], list):
            for summ in summaries:
                vv, tt = self.kill_stopwords(summ, self.all_token_vecs, self.all_tokens)
                summ_vecs.append(vv)
                summ_tokens.append(tt)
        else:
            print('INVALID INPUT SUMMARIES! Should be either a list of strings or a list of integers (indicating the sentence indices)')
            exit()
        #下面选择不同的相似度计算方式
        #scores = self.get_sbert_score(self.ref_vecs, self.ref_tokens, summ_vecs, summ_tokens, self.sim_metric)
        scores = self.WCD_score(self.ref_vecs, self.ref_tokens, summ_vecs, summ_tokens, self.sim_metric)
        return scores

    #去除停用词  每次输入一个文档用于构建伪参考的摘要语句  返回去除停用词之后的向量和token
    def kill_stopwords(self, sent_idx, all_token_vecs, all_tokens):
        #获取对应语句的向量和token
        for i,si in enumerate(sent_idx):
            assert len(all_token_vecs[si]) == len(all_tokens[si])
            if i == 0:
                full_vec = copy.deepcopy(all_token_vecs[si])
                full_token = copy.deepcopy(all_tokens[si])
            else:
                full_vec = np.row_stack((full_vec, all_token_vecs[si]))
                full_token.extend(all_tokens[si])
        #full_vec是所有语句合并，即token向量组成的矩阵 full_token则是对应的token
        mystopwords = list(set(stopwords.words(LANGUAGE)))#加载所有停用词
        mystopwords.extend(['[cls]','[sep]'])#额外把开始和结束词添加
        wanted_idx = [j for j,tk in enumerate(full_token) if tk.lower() not in mystopwords]#保留不是停用词的token
        return full_vec[wanted_idx], np.array(full_token)[wanted_idx]


    def get_sbert_score(self, ref_token_vecs, ref_tokens, summ_token_vecs, summ_tokens, sim_metric):
        #ref_token_vecs是去除停用词后的语句 ref_tokens是对应token
        #summ_token_vecs是摘要token的向量组成 summ_tokens是对应token
        #sim_metric是指标 类似于f1
        recall_list = []
        precision_list = []
        f1_list = []
        empty_summs_ids = []

        for i,rvecs in enumerate(ref_token_vecs):
            r_recall_list = []
            r_precision_list = []
            r_f1_list = []
            #每个参考摘要都要和所有待评估摘要进行对比
            for j,svecs in enumerate(summ_token_vecs):
                if svecs is None:#为什么会有None的情况？
                    empty_summs_ids.append(j)
                    r_recall_list.append(None)
                    r_precision_list.append(None)
                    r_f1_list.append(None)
                    continue
                #cosine_similarity是求余弦相似度 但是输入的两个矩阵第一个维度并不相同
                sim_matrix = cosine_similarity(rvecs,svecs)

                recall = np.mean(np.max(sim_matrix, axis=1))

                precision = np.mean(np.max(sim_matrix, axis=0))

                f1 = 2. * recall * precision / (recall + precision)
                r_recall_list.append(recall)
                r_precision_list.append(precision)
                r_f1_list.append(f1)
            recall_list.append(r_recall_list)
            precision_list.append(r_precision_list)
            f1_list.append(r_f1_list)
        empty_summs_ids = list(set(empty_summs_ids))
        recall_list = np.array(recall_list)
        precision_list = np.array(precision_list)
        f1_list = np.array(f1_list)
        if 'recall' in sim_metric:
            scores = []
            for i in range(len(summ_token_vecs)):
                if i in empty_summs_ids: scores.append(None)
                else: scores.append(np.mean(recall_list[:,i]))
            return scores
            #return np.mean(np.array(recall_list), axis=0)
        elif 'precision' in sim_metric:
            scores = []
            for i in range(len(summ_token_vecs)):
                if i in empty_summs_ids: scores.append(None)
                else: scores.append(np.mean(precision_list[:,i]))
            return scores
            #return np.mean(np.array(precision_list), axis=0)
        else:
            assert 'f1' in sim_metric
            scores = []
            for i in range(len(summ_token_vecs)):
                if i in empty_summs_ids:
                    scores.append(None)
                else:
                    scores.append(np.mean(f1_list[:,i]))
            return scores
            #return np.mean(np.mean(f1_list),axis=0)

    # -----------------------这里添加WCD等不同评估方式
    def WCD_score(self, ref_token_vecs, ref_tokens, summ_token_vecs, summ_tokens, sim_metric):
        WCD_value=[]
        for i,rvecs in enumerate(ref_token_vecs):
            #每个参考摘要都要和所有待评估摘要进行对比
            WDC_list1=[]
            for j,svecs in enumerate(summ_token_vecs):
                #构建词向量列表X
                wordList,word_mat=self.Get_wordlist(ref_tokens[i],summ_tokens[j],rvecs,svecs)


                #对每个矩阵将其扩充 使其大小为N*D
                Doc1,Doc2=[],[]
                for word in wordList:
                    if word in ref_tokens[i]:
                        Doc1.append(rvecs[list(ref_tokens[i]).index(word)])#找到对应单词的向量表示
                    else:
                        Doc1.append([0])
                for word in wordList:
                    if word in summ_tokens[j]:
                        Doc2.append(svecs[list(summ_tokens[j]).index(word)])#找到对应单词的向量表示
                    else:
                        Doc2.append([0])
                #对词向量求平均
                for i1 in range(len(Doc1)):
                    Doc1[i1]=np.mean(Doc1[i1])
                for i1 in range(len(Doc2)):
                    Doc2[i1] = np.mean(Doc2[i1])
                #求WCD
                re=np.matmul(word_mat.T,np.array(Doc1))
                re-=np.matmul(word_mat.T,np.array(Doc2))
                #求L2范式的值 目前re大小为D*1 对每个元素求平方加和后开根号
                WDC_=0
                for ele in re:
                    WDC_+=ele**2
                WDC_=np.sqrt(WDC_)
                WDC_list1.append(WDC_)
            #WDC_list1在一轮后得到 len(Doc2)个结果
            WCD_value.append(WDC_list1)
        #目前WCD_value shape是len(Doc1),len(Doc2)
        #对每一列求平均 即可得到每个摘要的平均得分
        WCD_value=np.array(WCD_value)
        result=[]
        for i in range(len(summ_token_vecs)):
            result.append(np.mean(WCD_value[:,i]))
        return result

    #获得两个文档的单词列表
    def Get_wordlist(self,doc1,doc2,rvecs,svecs):
        word_list=[]
        word_mat=[]
        for vec in doc1:
            word_list.append(vec)
            word_mat.append(rvecs[list(doc1).index(vec)])
        for vec in doc2:
            #判断vec是否已经存在word_list中
            if vec not in word_list:
                word_list.append(vec)
                word_mat.append(svecs[list(doc2).index(vec)])
        return word_list,np.array(word_mat)




    def get_token_vecs(self, model, sents, remove_stopwords=True):
        if len(sents) == 0: return None, None
        vecs, tokens = model.encode(sents, token_vecs=True)
        for i, rtv in enumerate(vecs):
            if i==0:
                full_vec = rtv
                full_token = tokens[i]
            else:
                full_vec = np.row_stack((full_vec, rtv))
                full_token.extend(tokens[i])
        if remove_stopwords:
            mystopwords = list(set(stopwords.words(LANGUAGE)))
            mystopwords.extend(['[cls]','[sep]'])
            wanted_idx = [j for j,tk in enumerate(full_token) if tk.lower() not in mystopwords]
        else:
            wanted_idx = [k for k in range(len(full_token))]
        return full_vec[wanted_idx], np.array(full_token)[wanted_idx]



