import numpy as np
import nltk
from nltk.translate.bleu_score import sentence_bleu as bleu
# 要去重

def compute_match_scores(pred_seqs,tgt_seqs):
    '''
    一条预测结果和真实值的匹配结果（关键词）
    :param tgt_seqs:
    :param pred_seqs: list，去重后的预测结果（关键词）
    '''
   
    match_score = np.zeros(shape=(len(pred_seqs)), dtype='float32') 
    
    target_number = len(tgt_seqs)
    predicted_number = len(pred_seqs)

    metric_dict = {'target_number': target_number, 'prediction_number': predicted_number, 'correct_number': match_score}

    for pred_id, pred_seq in enumerate(pred_seqs):
        match_score[pred_id] = 0
        for true_id, true_seq in enumerate(tgt_seqs):
            match = True
            if len(pred_seq) != len(true_seq):
                continue
            for pred_w, true_w in zip(pred_seq, true_seq):
                # if one two words are not same, match fails
                if pred_w != true_w:
                    match = False
                    break
            # if every word in pred_seq matches one true_seq exactly, match succeeds
            if match:
                match_score[pred_id] = 1
                break
        
    return match_score

def stemming_str(str):
    stemmer = nltk.stem.PorterStemmer()
    term_stem_list = [stemmer.stem(term) for term in str.split()]
    str_stem = ' '.join(term_stem_list).strip()
    return str_stem

def compute_metrics_one(pred_seq, tgt_seq, score_names, topk_range, do_lower=True, do_stem=True):
        """
        Return a dict of scores containing len(score_names) * len(topk_range) items
        score_names and topk_range actually only define the names of each score in score_dict.
        :param pred_seq: decoded predict sequense
        :param tgt_seq: real keyphrases sequense
        :param score_names:
        :param topk_range:
        :param do_lower:
        :param do_stem:
        :return: score_dict
        """
        score_dict = {}
        if type(pred_seq) == list:
            if do_lower:
                pred_seq = [element.lower() for element in pred_seq if type(element) == str]
            if do_stem:
                pred_list =[stemming_str(kp.strip()) for kp in pred_seq]
        elif type(pred_seq) == str:
            if do_lower:
                pred_seq = pred_seq.lower()
            if do_stem:
                pred_list =[stemming_str(kp.strip()) for kp in pred_seq.split(';')]
                
        if type(tgt_seq) == list:
            if do_lower:
                tgt_seq = [element.lower() for element in tgt_seq]
            if do_stem:
                tgt_list = [stemming_str(kp.strip()) for kp in tgt_seq]
        elif type(tgt_seq) == str:
            if do_lower:
                tgt_seq = tgt_seq.lower()
            if do_stem:
                tgt_list = [stemming_str(kp.strip()) for kp in tgt_seq.split(';')]
        # print('去重前的pred_list：',len(pred_list), pred_list)
        
        # remove duplicate phrases in prediction list
        unique_pred_list = []
        for phrase in pred_list:
            if phrase not in unique_pred_list:
                unique_pred_list.append(phrase)
        # print('原始的tgt_list：',len(tgt_list),tgt_list)
        # print('去重后的pred_list：',len(unique_pred_list), unique_pred_list,'\n')

        # 参考 kp_evaluatekp_evaluate_control.py（meng）
        # 添加提取present_tgt_list,present_pred_list; absent_tgt_list, absent_pred_list
        # 下面的内容写成方法，present/absent/all 对应的计算方法一致

        # 匹配结果
        match_list = compute_match_scores(unique_pred_list, tgt_list)
        # print('未截断的预测值与真实值的匹配结果：',len(match_list),match_list)
        score_dict = {}
        if len(tgt_list) == 0:
            for topk in topk_range:
                for score_name in score_names:
                    score_dict['{}@{}'.format(score_name, topk)] = 0.0
            return score_dict

        for topk in topk_range:
            if topk == 'k':
                cutoff = len(tgt_list)
            elif topk == 'M':
                cutoff = len(unique_pred_list)
            else:
                cutoff = topk
            # P@n，R@n, F@n
            if len(unique_pred_list) > cutoff:
                pred_list_k = unique_pred_list[:cutoff]
                match_list_k = match_list[:cutoff]
            else:
                pred_list_k = unique_pred_list
                match_list_k = match_list

            # print('根据topN(k={})截取后的match_list：{}{}'.format(topk,len(match_list_k),match_list_k))

            correct_num = int(sum(match_list_k))
            micro_p = float(sum(match_list_k)) / float(len(pred_list_k)) if len(pred_list_k) > 0 else 0.0
            micro_r = float(sum(match_list_k)) / float(len(tgt_list)) if len(tgt_list) > 0 else 0.0

            if micro_p + micro_r > 0:
                micro_f1 = float(2 * (micro_p * micro_r)) / (micro_p + micro_r)
            else:
                micro_f1 = 0.0

            for score_name, v in zip(['correct', 'precision', 'recall', 'f_score'], [correct_num, micro_p, micro_r, micro_f1]):
                score_dict['{}@{}'.format(score_name, topk)] = v
            
        return score_dict, len(pred_list_k), len(tgt_list)

def macro_metric(tgts, preds, k):
    import torch
    def denumpify_detensorize(metrics):
        """
        Recursively calls `.item()` on the element of the dictionary passed
        """
        if isinstance(metrics, (list, tuple)):
            return type(metrics)(denumpify_detensorize(m) for m in metrics)
        elif isinstance(metrics, dict):
            return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
        elif isinstance(metrics, np.generic):
            return metrics.item()
        elif isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
            return metrics.item()
        return metrics

    from collections import defaultdict
    score_names = ['correct', 'precision', 'recall', 'f_score']
    topk = k
    score_results = defaultdict(list)
    tp = 0
    len_pred = 0
    len_tgt = 0
    for pred, tgt in zip(preds, tgts):
        scores, _len_pred, _len_tgt = compute_metrics_one(pred, tgt, score_names, topk)
        tp += scores['correct@5']
        len_pred += _len_pred
        len_tgt += _len_tgt
        for metric, score in scores.items():
            score_results[metric].append(score)
    # print(score_results)
    # P = tp / len_pred
    # R = tp / len_tgt
    # F = 2 * P * R / (P + R)
    metric_result = {}
    for metric, score_list in score_results.items():
        metric_result[metric] = np.average(score_list) 
    # print(metric_result)
    
    my_metric = denumpify_detensorize(metric_result)
    return my_metric


if __name__ == '__main__':
    macro_metric()

    # a = ['aaa']
    # a.extend(score_names)
    # print(a)


        
               
