import os
import pandas as pd
from nltk.translate.bleu_score import SmoothingFunction
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.bleu_score import sentence_bleu
from nltk import word_tokenize


def my_corpus_bleu(preds, refs, verbose=False):
    refs = [[ref.strip().split()] for ref in refs]
    preds = [pred.strip().split() for pred in preds]
    Ba = corpus_bleu(refs, preds)

    def r(B):
        return round(B * 100, 4)

    if verbose:
        B1 = corpus_bleu(refs, preds, weights=(1, 0, 0, 0))
        B2 = corpus_bleu(refs, preds, weights=(0, 1, 0, 0))
        B3 = corpus_bleu(refs, preds, weights=(0, 0, 1, 0))
        B4 = corpus_bleu(refs, preds, weights=(0, 0, 0, 1))
        print('BLEU: {:.4f}\tB1: {:.4f}\tB2: {:.4f}\tB3: {:.4f}\tB4: {:.4f}'.format(r(Ba), r(B1), r(B2), r(B3), r(B4)))

    return Ba


# With multi-predictions for one function, pick the prediction with max bleu to calculate the final bleu
def max_bleu(list_of_hypotheses, list_of_references):
    max_hypotheses = []
    cc = SmoothingFunction()

    for multi_hypotheses, ref in zip(list_of_hypotheses, list_of_references):
        # drop the index number
        for idx, hyp in enumerate(multi_hypotheses):
            if hyp == "":
                continue
            multi_hypotheses[idx] = hyp[hyp.index(']') + 1:]
        multi_hypotheses = [hyp.split() for hyp in multi_hypotheses]
        ref = ref.split()

        max_score = 0.0
        max_sentence = None
        for hyp in multi_hypotheses:
            score = sentence_bleu([ref], hyp, smoothing_function=cc.method4)
            if score >= max_score:
                max_score = score
                max_sentence = hyp

        max_hypotheses.append(' '.join(max_sentence))

    my_corpus_bleu(max_hypotheses, list_of_references, verbose=True)


# pass the filename and then return the bleu metric
def get_bleu_by_file(prediction_file, label_file):
    evaluation_labels = pd.read_csv(label_file)
    # lower case
    # evaluation_labels['target'] = evaluation_labels['target'].str.strip()
    # evaluation_labels['target'] = evaluation_labels['target'].str.lower()
    evaluation_labels = evaluation_labels['comment'].tolist()
    evaluation_predictions = pd.read_csv(prediction_file)
    # print(evaluation_predictions[evaluation_predictions.isnull().values == True])
    evaluation_predictions.fillna("", inplace=True)
    evaluation_predictions1 = evaluation_predictions.iloc[:, 1:2].values.tolist()
    evaluation_predictions3 = evaluation_predictions.iloc[:, 1:4].values.tolist()
    evaluation_predictions5 = evaluation_predictions.iloc[:, 1:6].values.tolist()
    evaluation_predictions10 = evaluation_predictions.iloc[:, 1:11].values.tolist()
    # evaluation_predictions100 = evaluation_predictions.iloc[:, 1:101].values.tolist()
    # evaluation_predictions1 = evaluation_predictions['prediction1'].tolist()
    # nltk tokenize
    # evaluation_predictions1 = evaluation_predictions['prediction1'].map(lambda sent: ' '.join(word_tokenize(sent)))
    # evaluation_predictions1 = evaluation_predictions1.tolist()

    max_bleu(evaluation_predictions1, evaluation_labels)
    max_bleu(evaluation_predictions3, evaluation_labels)
    max_bleu(evaluation_predictions5, evaluation_labels)
    max_bleu(evaluation_predictions10, evaluation_labels)
    # max_bleu(evaluation_predictions100, evaluation_labels)


# # calculate single file
# prediction_file = '../output/predictions/bart-base-best-metric-rouge-code-comment-bs8-77-15/predictions-bs-100.csv'
# label_file = '../datasets/code-comment/test/funcoms.csv'
# print(prediction_file)
# get_bleu_by_file(prediction_file, label_file)

# calculate multi file in one directory
prediction_dir = '../output/predictions/bart-base-best-metric-rouge-code-comment-bs8-77-15/deduplication-bleu/'
label_file = '../datasets/code-comment/test/funcoms.csv'
files = os.listdir(prediction_dir)
files.sort()
for file in files:
    print("\n" + file)
    get_bleu_by_file(prediction_dir + file, label_file)




