import sys
sys.path.append('../')
import os
from tqdm import tqdm
import numpy as np
import random

from ref_free_metrics.pseudo_ref_builder import *
from resources import BASE_DIR
from .utils import parse_refs, parse_docs, get_sim_metric



# build pseudo references; the selected sentences will have non-zero weights
#metric是生成摘要的方式  输入了文档的每一句话 返回对应每一话的权重
def get_weights(sent_info_dic:dict, sent_vecs:list, metric:str):
    # use full source docs as the pseudo ref
    if metric == 'full_doc':
        weights = [1.]*len(sent_info_dic)
    # randomly extract N sentences as the pseudo ref
    elif metric.startswith('random'):
        if '_' in metric:
            ref_length = int(metric.split('_')[1])
        else:
            ref_length = 10 # by default we randomly select 10 sents from each doc as the pseudo-ref
        ridx = np.arange(len(sent_info_dic))
        np.random.shuffle(ridx)
        weights = [1. if i in ridx[:ref_length] else 0. for i in range(len(ridx))]
    # extract top N sentences as the pseudo ref
    elif metric.startswith('top'):
        if '_' in metric:
            topn = int(metric.split('_')[0][3:])
            thres = float(metric.split('_')[1])
        else:
            topn = int(metric[3:])
            thres = -1
        #上面用于读出topN的N
        weights = get_top_weights(sent_info_dic, topn)
        #weights 为len(sent_info_dic) 即针对每一句话的权重
        if thres > 0:#这里是在干什么？
            get_other_weights(sent_vecs, sent_info_dic, weights, thres)
    # SBert based LexRank, SLR in the paper
    elif metric.startswith('indep_graph') or metric.startswith('global_graph'):
        eles = metric.split('_')
        num = int(eles[2][3:])
        if 'extra' in metric:
            assert len(eles) == 5
            top_n = int(eles[3][5:])
            extra_amp = float(eles[-1])
        else:
            extra_amp = None
            top_n = None
        if 'indep' in metric:
            weights = get_indep_graph_weights(sent_info_dic, sent_vecs, num, top_n, extra_amp)
        else:
            weights = get_global_graph_weights(sent_info_dic, sent_vecs, num, top_n, extra_amp)
    # SBert-based cluster, global version (use all sents from all source docs to build a graph); SC_{G} in the paper
    elif metric.startswith('global_cluster'):
        weights = get_global_cluster_weights(sent_vecs)
    # SBert-based cluster, independent version (use sents from each source doc to build a graph); SC_{I} in the paper
    elif metric.startswith('indep_cluster'):
        weights = get_indep_cluster_weights(sent_info_dic, sent_vecs)
    elif metric.startswith('simmax'):
        simmax = float(metric.split('_')[1])
        weights = get_top_sim_weights(sent_info_dic, sent_vecs,simmax)
    return weights


#返回 文档的语句信息 编码结果（输入bert模型不为None） 语句的权重
def parse_documents(docs, bert_model, ref_metric, debug=False):
    if ref_metric == 'true_ref': # use golden ref as pseudo ref; the upper bound case
        sent_info_dic, sent_vecs = parse_refs(docs,bert_model)
        sents_weights = [1.]*len(sent_info_dic)
    else: # use strategy specified by 'ref_metric' to construct pseudo refs
        sent_info_dic, sent_vecs = parse_docs(docs,bert_model)
        #sent_info_dic字典方式 key文档标号 value:dict{'doc':path,'text':doc,'doc_len':22,'idx':doc_index}
        #sent_info_dic将一个文档中按照语句拆分了
        #sent_vecs为none 因为没有使用bert模型进行编码
        sents_weights = get_weights(sent_info_dic, sent_vecs, ref_metric)
    if debug:
        #可以看到伪参考摘要要求句子权重大于0.1
        pseudo_ref = [sent_info_dic[k]['text'] for k in sent_info_dic if sents_weights[k]>0.1]
        print('=====pseudo ref=====')
        print('\n'.join(pseudo_ref))
    return sent_info_dic, sent_vecs, sents_weights



def get_scores(docs, summs, bert_model, ref_metric, sim_metric, debug=False):
    sent_info_dic, sent_vecs, sents_weights = parse_documents(docs,bert_model,ref_metric,debug)
    pss = get_similarity_score(bert_model, summs, sents_weights, sent_vecs, sent_info_dic, sim_metric)
    return pss











