import sys
import os
from nltk.translate.bleu_score import corpus_bleu
import codecs
from collections import OrderedDict


def clear_text(text):
    text = text.strip()
    text = text.split(' ')[:250]
    text = ' '.join(text)
    return text


def convert_hyp(text):
    text = clear_text(text)
    return text.split(' ')


def convert_ref(text):
    text = clear_text(text)
    return [text.split(' ')]


def calc_diversity(texts):
    unigram = set()
    bigram = set()
    num_tok = 0
    for vec in texts:
        v_and_word = [tuple(vec[i:i + 2]) for i in range(len(vec) - 1)]
        vec = []
        for t in v_and_word:
            if t[0] == '<V>' and '<' not in t[1]:
                vec.append(t[1])

        v_len = len(vec)
        num_tok += v_len
        unigram.update(vec)
        bigram.update([tuple(vec[i:i + 2]) for i in range(v_len - 1)])
    metrics = OrderedDict()
    metrics['d_1'] = round(len(unigram) * 1.0 / num_tok * 100, 6)
    metrics['d_2'] = round(len(bigram) * 1.0 / num_tok * 100, 6)
    metrics['num_d1'] = len(unigram)
    metrics['num_d2'] = len(bigram)
    metrics['num_tok'] = num_tok
    return metrics


hyp_path = sys.argv[1]
tgt_path = sys.argv[2]

hypos = codecs.open(hyp_path, 'r', encoding='utf8').readlines()
refs = codecs.open(tgt_path, 'r', encoding='utf8').readlines()

hypos = map(convert_hyp, hypos)
refs = map(convert_ref, refs)
assert len(hypos) == len(refs)
print('number of test example', len(refs))
print('ref')
print(' '.join(refs[0][0]))
print('hyp')
print(' '.join(hypos[0]))

metrics = calc_diversity(hypos)

print_distinct = 'd1={:.6f}, d2={:.6f}, nd1={:d}, nd2={:d}, ntok={:d}'.format(*metrics.values())
print('=' * 50)
print(print_distinct)
print('=' * 50)

# write to file
with open(os.path.join(hyp_path + '.distinct_verb'), 'w') as f:
    f.write(print_distinct)



