import warnings
from typing import List

import numpy as np
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from nltk.translate.meteor_score import single_meteor_score
from rouge import Rouge

warnings.filterwarnings('ignore')


def batch_join(batch: List[List[str]], empty="EMPTY"):
    res = [" ".join(each) for each in batch]
    if empty:
        for idx, each in enumerate(res):
            if len(each.split()) == 0:
                res[idx] = empty
    return res


def mean(scores_list):
    return sum(scores_list) / len(scores_list)


def batch_acc(hypotheses, references):
    assert len(hypotheses) == len(references)
    return mean([ref == hyp for hyp, ref in zip(hypotheses, references)]) * 100


def batch_bleu(hypotheses, references, smooth_method=0, n=4, average=True):
    assert len(hypotheses) == len(references)
    cc = SmoothingFunction()
    smooth = getattr(cc, 'method' + str(smooth_method))
    weights = [1. / n] * n
    scores = [sentence_bleu([ref], hyp, weights, smoothing_function=smooth) for hyp, ref in zip(hypotheses, references)]
    res = mean(scores) if average else scores
    return 100 * res


def batch_meteor(hypotheses, references, alpha=0.85, beta=0.2, gamma=0.6, average=True):
    assert len(hypotheses) == len(references)
    hypotheses = batch_join(hypotheses, "HYP_EMPTY")
    references = batch_join(references, "REF_EMPTY")
    scores = [single_meteor_score(ref, hyp, alpha=alpha, beta=beta, gamma=gamma) \
              for hyp, ref in zip(hypotheses, references)]
    res = np.mean(scores) if average else scores
    return 100 * res


def batch_rouge(hypotheses, references, metrics=None):
    if metrics is None:
        metrics = ['rouge-l']
    assert len(hypotheses) == len(references)
    rouge = Rouge(metrics=metrics, max_n=4)
    hypotheses = batch_join(hypotheses, "HYP_EMPTY")
    references = batch_join(references, "REF_EMPTY")
    scores = rouge.get_scores(hypotheses, references)
    return scores['rouge-l']['f'] * 100
