import sys
import os
from nltk.translate.bleu_score import corpus_bleu
import codecs
from collections import OrderedDict
from metrics import embedding_metrics, Embedding
import nltk

discourse_sympol = {'<D%s>' % i for i in range(8)}


def split_sentence(paragraph):
    tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
    sentences = tokenizer.tokenize(paragraph)
    return sentences


def contain_sep(item):
    return ' <SEP> ' in item


def clear_text(text):
    text = text.strip()
    global is_filter
    if is_filter == 'true':
        text = text.split(' <SEP> ')[1]
    for dis in discourse_sympol:
        text = text.replace(' ' + dis, '')
    text = text.split(' ')[:250]
    text = ' '.join(text)
    if not text.endswith('.'):
        last_index = text.rfind('.')
        if last_index != -1:
            text = text[:last_index + 1]
    return text


def convert_hyp(text):
    text = clear_text(text)
    return text.split(' ')


def convert_ref(text):
    text = clear_text(text)
    return [text.split(' ')]


def convert_src(text):
    text = text.strip()
    text = text.split(' <SEP> ')[0]
    if ' <S> ' in text:
        text = text.split(' <S> ')[0]
    return text.split(' ')


def get_sentence_pair(stories, prompts):
    sentences = []
    sentences_pps = []
    for s, p in zip(stories, prompts):
        sens = split_sentence(' '.join(s))
        sens = [sen.split(' ') for sen in sens]
        sentences.extend(sens)
        sentences_pps.extend([p] * len(sens))
    return sentences, sentences_pps


# bleu, distinct, emb
hyp_path = sys.argv[1]
is_filter = sys.argv[2]


src_hypos = codecs.open(hyp_path, 'r', encoding='utf8').readlines()
print('total number of test example', len(src_hypos))

if is_filter == 'true':
    src_hypos = list(filter(contain_sep, src_hypos))

hypos = list(map(convert_hyp, src_hypos))
srcs = list(map(convert_src, src_hypos))
assert len(hypos) == len(srcs)
print('correct number of test example', len(hypos))
print('prompt')
print(' '.join(srcs[0]))
print('hyp')
print(' '.join(hypos[0]))


emb_path = '/home/rickwwang/project_research'

hypos_sens, srcs_sens = get_sentence_pair(hypos, srcs)
embedding_array = Embedding(emb_path)
emb_metrics = embedding_metrics(hypos_sens, srcs_sens, embedding_array)

print_emb_metrics = 'emb_avg={:.6f}, emb_ext={:.6f}, emb_gre={:.6f}'.format(*emb_metrics)
print('=' * 50)
print(print_emb_metrics)
print('=' * 50)

# write to file
with open(os.path.join(hyp_path + '.emb_metrics'), 'w') as f:
    f.write(print_emb_metrics)



