from itertools import chain
from nltk.corpus import wordnet
from nltk.stem import PorterStemmer

from nltk.translate.meteor_score import single_meteor_score

stemmer = PorterStemmer()


def match_enums(hypo_list, refer_list):
    _match = []
    for i in range(len(hypo_list))[::-1]:
        for j in range(len(refer_list))[::-1]:
            if hypo_list[i][1] == refer_list[j][1]:
                _match.append((hypo_list[i][0], refer_list[j][0]))
                hypo_list.pop(i)
                refer_list.pop(j)
                break
    return _match, hypo_list, refer_list


def calculate_meteor_score(hypothesis, reference):
    enum_hypo = list(enumerate(map(str.lower, hypothesis.split())))
    enum_refer = list(enumerate(map(str.lower, reference.split())))

    hypo_len = len(enum_hypo)
    refer_len = len(enum_refer)

    exact_matches, enum_hypo , enum_refer = match_enums(enum_hypo, enum_refer)

    enum_hypo = [(word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_hypo]
    enum_refer = [(word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_refer]

    stem_matches, enum_hypo, enum_refer = match_enums(enum_hypo, enum_refer)

    wns_matches = []
    for i in range(len(enum_hypo))[::-1]:
        hypothesis_syns = set(
            chain.from_iterable(
                (lemma.name() for lemma in synset.lemmas() if lemma.name().find("_") < 0)
                for synset in wordnet.synsets(enum_hypo[i][1])
            )
        ).union({enum_hypo[i][1]})
        for j in range(len(enum_refer))[::-1]:
            if enum_refer[j][1] in hypothesis_syns:
                wns_matches.append((enum_hypo[i][0], enum_refer[j][0]))
                enum_hypo.pop(i)
                enum_refer.pop(j)
                break

    total_matches = sorted(exact_matches + stem_matches + wns_matches, key=lambda wordpair: wordpair[0])

    matches_count = len(total_matches)

    chunk_count = 1.0
    for i in range(matches_count - 1):
        if (total_matches[i + 1][0] != total_matches[i][0] + 1) or (
            total_matches[i + 1][1] != total_matches[i][1] + 1
        ):
            chunk_count += 1

    try:
        precision = float(matches_count) / hypo_len
        recall = float(matches_count) / refer_len
        fmean = (precision * recall) / (0.9 * precision + (1 - 0.9) * recall)
        frag_frac = chunk_count / matches_count
    except ZeroDivisionError:
        return 0.0
    penalty = 0.5 * frag_frac**3
    return (1 - penalty) * fmean

def safe_calculate_meteor_score(hypothesis, reference) -> float | Exception:
    try:
        return calculate_meteor_score(hypothesis, reference)
    except Exception as e:
        return e

if __name__ == "__main__":
    refer = "Her shirt has medium sleeves. The neckline of it is v-shape. The pants this lady wears is of long length. The pants are with cotton fabric and solid color patterns"
    hypo = 'Her shirt has medium sleeves, cotton fabric and solid color patterns. The neckline of it is v-shape. The pants this lady wears is of long length. The pants are with cotton fabric and solid color patterns'
    score = calculate_meteor_score(hypo, refer)
    official_score = single_meteor_score(refer.split(), hypo.split())
    print(score)
    print(official_score)
