# -*- coding:utf8 -*-
# @Time : 2023/3/28 17:31
# @Author : WanJie Wu

import math
import warnings
import argparse
import numpy as np
from tqdm import tqdm
from typing import List
from functools import reduce
from collections import Counter
from sklearn.metrics import classification_report, precision_score, recall_score

PARSER = argparse.ArgumentParser()
warnings.filterwarnings("ignore")


def counter_matched_ngram(candidate_gram: List[str], ref_grams: List[List[str]]):
    """
    candidate_gram 预测答案的ngram；表示为C
    ref_grams 参考答案的ngram；其中参考答案可有S1,S2,...,Sm个
    """
    # ngram i 在第j个参考疑问的次数
    hi_sj = dict()
    for ref_gram in ref_grams:
        for gram_char, count in Counter(ref_gram).items():
            hi_sj[gram_char] = max(count, hi_sj.get(gram_char, 0))

    # 计算ngram i在C和S中共同出现的次数；取min
    match_size = 0
    for gram_char, count in Counter(candidate_gram).items():
        match_size += min(count, hi_sj.get(gram_char, 0))

    return match_size


def generate_ngram(sequence: str, gram_size: int):
    """
    sequence: 待分gram序列
    gram_size: gram取值大小
    """
    ngram_list = []
    for left in range(max(len(sequence) - gram_size + 1, 1)):
        ngram_list.append(sequence[left: left + gram_size])
    return ngram_list


class RougeL(object):
    """Longest Common subSequence"""
    def __init__(self, candidate: str, refs: List[str], beta: float = 1.0):
        self.beta = beta
        self.candidate = candidate
        self.refs = refs
        self._candi_len = len(self.candidate)
        self.inst_scores = []

    def longest_common_subsequence(self, ref: str):
        """获取最长公共子序列"""
        len_ref = len(ref)
        dp = [[0] * (self._candi_len + 1) for _ in range(len_ref + 1)]
        for i in range(1, len_ref + 1):
            for j in range(1, self._candi_len + 1):
                if ref[i - 1] == self.candidate[j - 1]:
                    dp[i][j] = dp[i - 1][j - 1] + 1
                else:
                    dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])

        return dp[len_ref][self._candi_len]

    def rouge_l_score(self):
        """
        计算rouge score值
        """
        precision, recall = 0.0, 0.0
        for i, ref in enumerate(self.refs):
            basic_lcs = self.longest_common_subsequence(ref)
            tmp_p = float(basic_lcs / self._candi_len) if self._candi_len else 0.
            if tmp_p > precision:
                precision = tmp_p
            tmp_r = float(basic_lcs / len(ref))
            if tmp_r > recall:
                recall = tmp_r
        score = 0.0
        if precision == 0.0 or recall == 0.0:
            return score

        score = ((1 + self.beta ** 2) * precision * recall) / float(recall + self.beta ** 2 * precision)
        return score


class BLEU4(object):
    def __init__(self, candidate: str, refs: List[str], max_ngram: int = 4):
        """
        candidate 预测文本
        refs 参考文本列表
        max_ngram 最大gram数
        """
        self.max_ngram = max_ngram
        self.candidate = candidate
        self.refs = refs
        self.min_ngram = 1
        assert self.max_ngram > self.min_ngram
        self.prob_n = dict()
        self.bp = 0

    def ngram_probability(self):
        """
        计算每一中ngram的prob, 其中1gram表示忠于原文的程度，ngram表示流畅度
        """
        for gram_size in range(self.min_ngram, self.max_ngram + 1):
            candidate_ngram = generate_ngram(self.candidate, gram_size)
            refs_ngram = [generate_ngram(ref, gram_size) for ref in self.refs]
            matched_size = counter_matched_ngram(candidate_ngram, refs_ngram)
            candidate_size = len(candidate_ngram)
            self.prob_n[gram_size] = float(matched_size / candidate_size) ** (1 / float(self.max_ngram))

    def penalty_factor(self):
        """计算惩罚因子"""
        lc = len(self.candidate)  # lc表示预测文本的长度
        lr = min([(abs(lc - len(ref)), len(ref)) for ref in self.refs])[1]  # 取与预测答案长度最接近的答案为r
        self.bp = math.exp(min(1-float(lr / lc), 0))

    def bleu_score(self):
        """评估分值"""
        self.ngram_probability()
        self.penalty_factor()
        score = self.bp * reduce(
            lambda x, y: x * y, [self.prob_n[ngram_size] for ngram_size in range(self.min_ngram, self.max_ngram + 1)])
        return score


def du_reader_metrics(ground_truths, predictions, beta=1.0):
    """
    :param predictions: "0000003": [{'score': 0.02061980590224266, 'text': '了框架还增加了部分剪力墙（或称抗震墙）吸取了', 'start': 1, 'end':3}]
    :param ground_truths: "0000003": {"context": str, "question": str, "answers": [(start, end)]}
    :param beta: 默认1.0
    :return:
    """
    assert len(ground_truths) == len(predictions)
    truth_labels = []
    pred_labels = []
    bleu_scores, rouge_scores = [], []

    for data_id, real_data in tqdm(ground_truths.items(), desc="测试评估进度", total=len(ground_truths)):
        pred_data = predictions[data_id][0] if predictions[data_id] else None
        truth_labels.append(1 if real_data["original_answers"] else 0)
        pred_labels.append(1 if pred_data else 0)

        if not real_data["original_answers"] or not pred_data:
            continue

        real_answer = ["".join(real_data["content"])[answer[0]: answer[1]] for answer in real_data["original_answers"]]
        bleu_1 = BLEU4(pred_data["answer"], real_answer).bleu_score()
        rouge_1 = RougeL(pred_data["answer"], real_answer, beta).rouge_l_score()
        bleu_scores.append(bleu_1)
        rouge_scores.append(rouge_1)

    class_report = classification_report(
        y_true=truth_labels,
        y_pred=pred_labels,
        labels=[0, 1],
        target_names=["无答案", "有答案"],
        digits=3,
    )
    recall = recall_score(truth_labels, pred_labels)
    precision = precision_score(truth_labels, pred_labels)
    return {
        "BLEU-4": np.around(np.mean(bleu_scores), 3),
        "ROUGE-L": np.around(np.mean(rouge_scores), 3),
        "recall": round(recall, 3),
        "precision": round(precision, 3),
        "classifyReport": class_report
    }
