import pandas as pd
from nltk.translate.bleu_score import SmoothingFunction
from nltk.translate.bleu_score import sentence_bleu
# from rouge import Rouge
# from myrouge.rouge import Rouge
import matplotlib.pyplot as plt
from datasets import load_metric

metric = load_metric("rouge", seed=42)
def distinct(multi_hypotheses, n):
    multi_hypotheses = [hyp.split() for hyp in multi_hypotheses]
    ngram_set = set()
    ngram_sum = 0
    for hyp in multi_hypotheses:
        length = len(hyp) - n + 1
        for i in range(length):
            ngram_sum += 1
            ngram_set.add(' '.join(hyp[i:i + n]))
    if ngram_sum > 0:
        return len(ngram_set) / ngram_sum
    else:
        return 0.0


def max_bleu(multi_hypotheses, reference):
    cc = SmoothingFunction()

    multi_hypotheses = [hyp.split() for hyp in multi_hypotheses]
    ref = reference.split()

    max_score = 0.0
    for hyp in multi_hypotheses:
        score = sentence_bleu([ref], hyp, smoothing_function=cc.method4)
        if score >= max_score:
            max_score = score
    return max_score


def max_bleu_dedu(multi_hypotheses, reference):
    cc = SmoothingFunction()
    # drop the index number
    for idx, hyp in enumerate(multi_hypotheses):
        if hyp == "":
            continue
        multi_hypotheses[idx] = hyp[hyp.index(']') + 1:]
    multi_hypotheses = [hyp.split() for hyp in multi_hypotheses]
    ref = reference.split()

    max_score = 0.0
    for hyp in multi_hypotheses:
        score = sentence_bleu([ref], hyp, smoothing_function=cc.method4)
        if score >= max_score:
            max_score = score
    return max_score


def max_rouge(multi_hypotheses, reference):
    # metric = load_metric("rouge", seed=42)

    max_score = 0.0
    for hyp in multi_hypotheses:
        if hyp == "":
            continue
        try:
            score = metric.compute(predictions=[hyp], references=[reference], use_stemmer=True)['rougeL'].mid.fmeasure
        except:
            score = 0.0
        if score >= max_score:
            max_score = score
    return max_score


def max_rouge_dedu(multi_hypotheses, reference):
    # metric = load_metric("rouge", seed=42)

    max_score = 0.0
    for hyp in multi_hypotheses:
        if hyp == "":
            continue
        if hyp.find(']') != -1:
            hyp = hyp[hyp.index(']') + 1:]
        try:
            score = metric.compute(predictions=[hyp], references=[reference], use_stemmer=True)['rougeL'].mid.fmeasure
        except:
            score = 0.0
        if score >= max_score:
            max_score = score
    return max_score


output_path_beam = '3.csv'
output_path_dedu = '4.csv'
evaluation_labels = pd.read_csv('datasets/PRTiger/with-token/test.csv')
# evaluation_labels['target'] = evaluation_labels['target'].str.strip()
# evaluation_labels['target'] = evaluation_labels['target'].str.lower()
evaluation_labels = evaluation_labels['summary'].tolist()
res1 = pd.read_csv('output/predictions/bart-base-best-metric-rouge-pr-title-withtoken-bs8-586-21/predictions-bs-100.csv')
res2 = pd.read_csv('output/predictions/bart-base-best-metric-rouge-pr-title-withtoken-bs8-586-21/deduplication-rouge/hypothesis-bs-top10-clearsearched-rouge0.6.csv')
res1.fillna("", inplace=True)
res2.fillna("", inplace=True)
res1 = res1.values.tolist()
res2 = res2.values.tolist()

score1 = []
for i in range(len(res1)):
    score1.append(max_rouge(res1[i][11:101], evaluation_labels[i]))
    print("[1]{}/4379".format(i))
score1 = pd.DataFrame(score1)
score2 = []
for i in range(len(res2)):
    score2.append(max_rouge(res1[i][1:11], evaluation_labels[i]))
    print("[2]{}/4379".format(i))
score2 = pd.DataFrame(score2)
score1['res2'] = score2[0]
score1['diff'] = score2[0] - score1[0]
score1.columns = ['res1', 'res2', 'diff']
score1.to_csv(output_path_beam)

score1 = []
for i in range(len(res1)):
    chosen_indexs = []
    not_chosen_texts = []
    for s in res2[i][1:11]:
        chosen_indexs.append(int(s.split(']')[0].lstrip('[')))
    for idx, s in enumerate(res1[i][1:101]):
        if idx not in chosen_indexs:
            not_chosen_texts.append(s)
    score1.append(max_rouge(not_chosen_texts, evaluation_labels[i]))
    print("[3]{}/4379".format(i))
score1 = pd.DataFrame(score1)
score2 = []
for i in range(len(res2)):
    score2.append(max_rouge_dedu(res2[i][1:11], evaluation_labels[i]))
    print("[4]{}/4379".format(i))
score2 = pd.DataFrame(score2)
score1['res2'] = score2[0]
score1['diff'] = score2[0] - score1[0]
score1.columns = ['res1', 'res2', 'diff']
score1.to_csv(output_path_dedu)
data = pd.read_csv(output_path_beam)
data_de = pd.read_csv(output_path_dedu)
data['de_diff'] = data_de['diff']
data.replace({"diff": {0.0: -5.0}}, inplace=True)
data.replace({"de_diff": {0.0: -5.0}}, inplace=True)
data['de_diff_range'] = pd.cut(x=data['de_diff'], bins=[-5.0, -1.0, -0.8, -0.5, -0.3, -0.2, -0.15, -0.1, -0.08, -0.06, -0.04, -0.02, 0,
                                                  0.02, 0.04, 0.06, 0.08, 0.1, 0.15, 0.2, 0.3, 0.5, 0.8, 1.0], right=False)
data['diff_range'] = pd.cut(x=data['diff'], bins=[-5.0, -1.0, -0.8, -0.5, -0.3, -0.2, -0.15, -0.1, -0.08, -0.06, -0.04, -0.02, 0,
                                                  0.02, 0.04, 0.06, 0.08, 0.1, 0.15, 0.2, 0.3, 0.5, 0.8, 1.0], right=False)
# print(data['diff_range'].value_counts(normalize=True))
# print("[0.0]    {}".format((data['diff']==0.0).sum()/len(data)))
# print(data.sort_values(by='diff', ascending=False).head(100))
# bar = data['diff_range'].value_counts(normalize=True, sort=False).plot.bar()
to_plot = pd.DataFrame()
to_plot['beam_search'] = data['diff_range'].value_counts(normalize=True, sort=False)
to_plot['deduplication'] = data['de_diff_range'].value_counts(normalize=True, sort=False)
to_plot.plot.bar()
plt.xticks(rotation=60)
plt.tight_layout()
plt.title('pull-request-title-bart-chosen10&top10-other90')
plt.show()


