import os
import pandas as pd
# from rouge import Rouge
from myrouge.rouge import Rouge
from nltk import word_tokenize


# calculate the rouge metric
def max_rouge(list_of_hypotheses, list_of_references):
    rouge = Rouge()
    max_hypotheses = []

    for multi_hypotheses, ref in zip(list_of_hypotheses, list_of_references):
        max_score = 0.0
        max_sentence = None
        for hyp in multi_hypotheses:
            if hyp == "":
                continue
            # drop the index number
            hyp = hyp[hyp.index(']')+1:]
            score = rouge.get_scores(hyp, ref)[0]['rouge-l']['f']
            if score >= max_score:
                max_score = score
                max_sentence = hyp

        max_hypotheses.append(max_sentence)

    avg_score = rouge.get_scores(max_hypotheses, list_of_references, avg=True)

    def r(x):
        return round(x * 100, 4)

    print('rouge-1: {:.4}\t\trouge-2: {:.4}\t\trouge-l: {:.4}'.format(r(avg_score['rouge-1']['f']),
                                                                      r(avg_score['rouge-2']['f']),
                                                                      r(avg_score['rouge-l']['f'])))
    return avg_score


# pass the filename and then return the rouge metric
def get_rouge_by_file(predition_file, label_file):
    evaluation_labels = pd.read_csv(label_file)
    # lower case
    # evaluation_labels['abstract'] = evaluation_labels['abstract'].str.strip()
    # evaluation_labels['target'] = evaluation_labels['target'].str.lower()
    evaluation_labels = evaluation_labels['abstract'].tolist()
    evaluation_predictions = pd.read_csv(predition_file)
    # print(evaluation_predictions[evaluation_predictions.isnull().values == True])
    evaluation_predictions.fillna("", inplace=True)
    # evaluation_predictions10 = evaluation_predictions[
    #     ['prediction1', 'prediction2', 'prediction3', 'prediction4', 'prediction5', 'prediction6', 'prediction7',
    #      'prediction8', 'prediction9', 'prediction10']].values.tolist()
    # evaluation_predictions5 = evaluation_predictions[
    #     ['prediction1', 'prediction2', 'prediction3', 'prediction4', 'prediction5']].values.tolist()
    # evaluation_predictions3 = evaluation_predictions[['prediction1', 'prediction2', 'prediction3']].values.tolist()
    # evaluation_predictions1 = evaluation_predictions[['prediction1']].values.tolist()
    # evaluation_predictions1 = evaluation_predictions['prediction1'].tolist()
    # nltk tokenize
    # evaluation_predictions1 = evaluation_predictions['prediction1'].map(lambda sent: ' '.join(word_tokenize(sent)))
    # evaluation_predictions1 = evaluation_predictions1.tolist()
    evaluation_predictions1 = evaluation_predictions.iloc[:, 1:2].values.tolist()
    evaluation_predictions3 = evaluation_predictions.iloc[:, 1:4].values.tolist()
    evaluation_predictions5 = evaluation_predictions.iloc[:, 1:6].values.tolist()
    evaluation_predictions10 = evaluation_predictions.iloc[:, 1:11].values.tolist()
    # evaluation_predictions100 = evaluation_predictions.iloc[:, 1:101].values.tolist()

    max_rouge(evaluation_predictions1, evaluation_labels)
    max_rouge(evaluation_predictions3, evaluation_labels)
    max_rouge(evaluation_predictions5, evaluation_labels)
    max_rouge(evaluation_predictions10, evaluation_labels)
    # max_rouge(evaluation_predictions100, evaluation_labels)


# # calculate single file
# prediction_file = '../output/predictions/bart-base-best-metric-rouge-pull-request-bs8-534-126/clusting/hypothesis-bs-top10-agglomerative-rouge-distmatrix-input-linkage-single.csv'
# print(prediction_file)
# label_file = '../datasets/pull_request/test.pr_commits_20_400_100_0.5_nltk.csv'
# get_rouge_by_file(prediction_file, label_file)

# calculate multi file in one directory
prediction_dir = '../output/predictions/bart-base-best-metric-rouge-pull-request-bs8-534-126/dedu-last-rouge/'
label_file = '../datasets/pull_request/test.pr_commits_20_400_100_0.5_nltk.csv'
files = os.listdir(prediction_dir)
files.sort()
for file in files:
    print("\n" + file)
    get_rouge_by_file(prediction_dir + file, label_file)
